Upgrade V8 to 5.1.281.57  DO NOT MERGE

FPIIM-449

Change-Id: Id981b686b4d587ac31697662eb98bb34be42ad90
(cherry picked from commit 3b9bc31999c9787eb726ecdbfd5796bfdec32a18)
diff --git a/src/interpreter/bytecode-array-builder.cc b/src/interpreter/bytecode-array-builder.cc
index 7103c72..109b01e 100644
--- a/src/interpreter/bytecode-array-builder.cc
+++ b/src/interpreter/bytecode-array-builder.cc
@@ -4,6 +4,7 @@
 
 #include "src/interpreter/bytecode-array-builder.h"
 #include "src/compiler.h"
+#include "src/interpreter/interpreter-intrinsics.h"
 
 namespace v8 {
 namespace internal {
@@ -17,49 +18,75 @@
     // This helper is expected to be instantiated only when the last bytecode is
     // in the same basic block.
     DCHECK(array_builder_.LastBytecodeInSameBlock());
+    bytecode_ = Bytecodes::FromByte(
+        array_builder_.bytecodes()->at(previous_bytecode_start_));
+    operand_scale_ = OperandScale::kSingle;
+    if (Bytecodes::IsPrefixScalingBytecode(bytecode_)) {
+      operand_scale_ = Bytecodes::PrefixBytecodeToOperandScale(bytecode_);
+      bytecode_ = Bytecodes::FromByte(
+          array_builder_.bytecodes()->at(previous_bytecode_start_ + 1));
+    }
   }
 
   // Returns the previous bytecode in the same basic block.
   MUST_USE_RESULT Bytecode GetBytecode() const {
     DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
-    return Bytecodes::FromByte(
-        array_builder_.bytecodes()->at(previous_bytecode_start_));
+    return bytecode_;
   }
 
-  // Returns the operand at operand_index for the previous bytecode in the
-  // same basic block.
-  MUST_USE_RESULT uint32_t GetOperand(int operand_index) const {
-    DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
-    Bytecode bytecode = GetBytecode();
-    DCHECK_GE(operand_index, 0);
-    DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode));
-    size_t operand_offset =
-        previous_bytecode_start_ +
-        Bytecodes::GetOperandOffset(bytecode, operand_index);
-    OperandSize size = Bytecodes::GetOperandSize(bytecode, operand_index);
-    switch (size) {
-      case OperandSize::kNone:
-        UNREACHABLE();
-        break;
-      case OperandSize::kByte:
-        return static_cast<uint32_t>(
-            array_builder_.bytecodes()->at(operand_offset));
-      case OperandSize::kShort:
-        uint16_t operand =
-            (array_builder_.bytecodes()->at(operand_offset) << 8) +
-            array_builder_.bytecodes()->at(operand_offset + 1);
-        return static_cast<uint32_t>(operand);
-    }
-    return 0;
+  MUST_USE_RESULT Register GetRegisterOperand(int operand_index) const {
+    return Register::FromOperand(GetSignedOperand(operand_index));
+  }
+
+  MUST_USE_RESULT uint32_t GetIndexOperand(int operand_index) const {
+    return GetUnsignedOperand(operand_index);
   }
 
   Handle<Object> GetConstantForIndexOperand(int operand_index) const {
     return array_builder_.constant_array_builder()->At(
-        GetOperand(operand_index));
+        GetIndexOperand(operand_index));
   }
 
  private:
+  // Returns the signed operand at operand_index for the previous
+  // bytecode in the same basic block.
+  MUST_USE_RESULT int32_t GetSignedOperand(int operand_index) const {
+    DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
+    OperandType operand_type =
+        Bytecodes::GetOperandType(bytecode_, operand_index);
+    DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+    const uint8_t* operand_start = GetOperandStart(operand_index);
+    return Bytecodes::DecodeSignedOperand(operand_start, operand_type,
+                                          operand_scale_);
+  }
+
+  // Returns the unsigned operand at operand_index for the previous
+  // bytecode in the same basic block.
+  MUST_USE_RESULT uint32_t GetUnsignedOperand(int operand_index) const {
+    DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
+    OperandType operand_type =
+        Bytecodes::GetOperandType(bytecode_, operand_index);
+    DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+    const uint8_t* operand_start = GetOperandStart(operand_index);
+    return Bytecodes::DecodeUnsignedOperand(operand_start, operand_type,
+                                            operand_scale_);
+  }
+
+  const uint8_t* GetOperandStart(int operand_index) const {
+    size_t operand_offset =
+        previous_bytecode_start_ + prefix_offset() +
+        Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale_);
+    return &(*array_builder_.bytecodes())[0] + operand_offset;
+  }
+
+  int prefix_offset() const {
+    return Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale_) ? 1
+                                                                         : 0;
+  }
+
   const BytecodeArrayBuilder& array_builder_;
+  OperandScale operand_scale_;
+  Bytecode bytecode_;
   size_t previous_bytecode_start_;
 
   DISALLOW_COPY_AND_ASSIGN(PreviousBytecodeHelper);
@@ -67,7 +94,8 @@
 
 BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone,
                                            int parameter_count,
-                                           int context_count, int locals_count)
+                                           int context_count, int locals_count,
+                                           FunctionLiteral* literal)
     : isolate_(isolate),
       zone_(zone),
       bytecodes_(zone),
@@ -82,11 +110,15 @@
       parameter_count_(parameter_count),
       local_register_count_(locals_count),
       context_register_count_(context_count),
-      temporary_allocator_(zone, fixed_register_count()),
-      register_translator_(this) {
+      temporary_allocator_(zone, fixed_register_count()) {
   DCHECK_GE(parameter_count_, 0);
   DCHECK_GE(context_register_count_, 0);
   DCHECK_GE(local_register_count_, 0);
+  return_position_ =
+      literal ? std::max(literal->start_position(), literal->end_position() - 1)
+              : RelocInfo::kNoPosition;
+  LOG_CODE_EVENT(isolate_, CodeStartLinePosInfoRecordEvent(
+                               source_position_table_builder()));
 }
 
 BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
@@ -119,45 +151,49 @@
   DCHECK(exit_seen_in_block_);
 
   int bytecode_size = static_cast<int>(bytecodes_.size());
-  int register_count =
-      fixed_and_temporary_register_count() + translation_register_count();
+  int register_count = fixed_and_temporary_register_count();
   int frame_size = register_count * kPointerSize;
   Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray();
   Handle<FixedArray> handler_table = handler_table_builder()->ToHandlerTable();
-  Handle<FixedArray> source_position_table =
-      source_position_table_builder()->ToFixedArray();
-  Handle<BytecodeArray> output = isolate_->factory()->NewBytecodeArray(
+  Handle<ByteArray> source_position_table =
+      source_position_table_builder()->ToSourcePositionTable();
+  Handle<BytecodeArray> bytecode_array = isolate_->factory()->NewBytecodeArray(
       bytecode_size, &bytecodes_.front(), frame_size, parameter_count(),
       constant_pool);
-  output->set_handler_table(*handler_table);
-  output->set_source_position_table(*source_position_table);
+  bytecode_array->set_handler_table(*handler_table);
+  bytecode_array->set_source_position_table(*source_position_table);
+
+  void* line_info = source_position_table_builder()->DetachJITHandlerData();
+  LOG_CODE_EVENT(isolate_, CodeEndLinePosInfoRecordEvent(
+                               AbstractCode::cast(*bytecode_array), line_info));
+
   bytecode_generated_ = true;
-  return output;
+  return bytecode_array;
 }
 
-
 template <size_t N>
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t(&operands)[N]) {
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t (&operands)[N],
+                                  OperandScale operand_scale) {
   // Don't output dead code.
-  if (exit_seen_in_block_) {
-    source_position_table_builder_.RevertPosition(bytecodes()->size());
-    return;
-  }
+  if (exit_seen_in_block_) return;
 
   int operand_count = static_cast<int>(N);
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
 
-  int register_operand_count = Bytecodes::NumberOfRegisterOperands(bytecode);
-  if (register_operand_count > 0) {
-    register_translator()->TranslateInputRegisters(bytecode, operands,
-                                                   operand_count);
+  last_bytecode_start_ = bytecodes()->size();
+  // Emit prefix bytecode for scale if required.
+  if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale)) {
+    bytecodes()->push_back(Bytecodes::ToByte(
+        Bytecodes::OperandScaleToPrefixBytecode(operand_scale)));
   }
 
-  last_bytecode_start_ = bytecodes()->size();
+  // Emit bytecode.
   bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+
+  // Emit operands.
   for (int i = 0; i < operand_count; i++) {
-    DCHECK(OperandIsValid(bytecode, i, operands[i]));
-    switch (Bytecodes::GetOperandSize(bytecode, i)) {
+    DCHECK(OperandIsValid(bytecode, operand_scale, i, operands[i]));
+    switch (Bytecodes::GetOperandSize(bytecode, i, operand_scale)) {
       case OperandSize::kNone:
         UNREACHABLE();
         break;
@@ -171,58 +207,61 @@
                             operand_bytes + 2);
         break;
       }
+      case OperandSize::kQuad: {
+        uint8_t operand_bytes[4];
+        WriteUnalignedUInt32(operand_bytes, operands[i]);
+        bytecodes()->insert(bytecodes()->end(), operand_bytes,
+                            operand_bytes + 4);
+        break;
+      }
     }
   }
-
-  if (register_operand_count > 0) {
-    register_translator()->TranslateOutputRegisters();
-  }
 }
 
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
-                                  uint32_t operand1, uint32_t operand2,
-                                  uint32_t operand3) {
-  uint32_t operands[] = {operand0, operand1, operand2, operand3};
-  Output(bytecode, operands);
-}
-
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
-                                  uint32_t operand1, uint32_t operand2) {
-  uint32_t operands[] = {operand0, operand1, operand2};
-  Output(bytecode, operands);
-}
-
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
-                                  uint32_t operand1) {
-  uint32_t operands[] = {operand0, operand1};
-  Output(bytecode, operands);
-}
-
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
-  uint32_t operands[] = {operand0};
-  Output(bytecode, operands);
-}
-
-
 void BytecodeArrayBuilder::Output(Bytecode bytecode) {
   // Don't output dead code.
-  if (exit_seen_in_block_) {
-    source_position_table_builder_.RevertPosition(bytecodes()->size());
-    return;
-  }
+  if (exit_seen_in_block_) return;
 
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
   last_bytecode_start_ = bytecodes()->size();
   bytecodes()->push_back(Bytecodes::ToByte(bytecode));
 }
 
+void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
+                                        OperandScale operand_scale,
+                                        uint32_t operand0, uint32_t operand1,
+                                        uint32_t operand2, uint32_t operand3) {
+  uint32_t operands[] = {operand0, operand1, operand2, operand3};
+  Output(bytecode, operands, operand_scale);
+}
+
+void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
+                                        OperandScale operand_scale,
+                                        uint32_t operand0, uint32_t operand1,
+                                        uint32_t operand2) {
+  uint32_t operands[] = {operand0, operand1, operand2};
+  Output(bytecode, operands, operand_scale);
+}
+
+void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
+                                        OperandScale operand_scale,
+                                        uint32_t operand0, uint32_t operand1) {
+  uint32_t operands[] = {operand0, operand1};
+  Output(bytecode, operands, operand_scale);
+}
+
+void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
+                                        OperandScale operand_scale,
+                                        uint32_t operand0) {
+  uint32_t operands[] = {operand0};
+  Output(bytecode, operands, operand_scale);
+}
+
 BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
                                                             Register reg) {
-  Output(BytecodeForBinaryOperation(op), reg.ToRawOperand());
+  OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(reg));
+  OutputScaled(BytecodeForBinaryOperation(op), operand_scale,
+               RegisterOperand(reg));
   return *this;
 }
 
@@ -245,7 +284,9 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
                                                              Register reg) {
-  Output(BytecodeForCompareOperation(op), reg.ToRawOperand());
+  OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(reg));
+  OutputScaled(BytecodeForCompareOperation(op), operand_scale,
+               RegisterOperand(reg));
   return *this;
 }
 
@@ -255,10 +296,11 @@
   int32_t raw_smi = smi->value();
   if (raw_smi == 0) {
     Output(Bytecode::kLdaZero);
-  } else if (raw_smi >= -128 && raw_smi <= 127) {
-    Output(Bytecode::kLdaSmi8, static_cast<uint8_t>(raw_smi));
   } else {
-    LoadLiteral(Handle<Object>(smi, isolate_));
+    OperandSize operand_size = SizeForSignedOperand(raw_smi);
+    OperandScale operand_scale = OperandSizesToScale(operand_size);
+    OutputScaled(Bytecode::kLdaSmi, operand_scale,
+                 SignedOperand(raw_smi, operand_size));
   }
   return *this;
 }
@@ -266,13 +308,9 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
   size_t entry = GetConstantPoolEntry(object);
-  if (FitsInIdx8Operand(entry)) {
-    Output(Bytecode::kLdaConstant, static_cast<uint8_t>(entry));
-  } else if (FitsInIdx16Operand(entry)) {
-    Output(Bytecode::kLdaConstantWide, static_cast<uint16_t>(entry));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForUnsignedOperand(entry));
+  OutputScaled(Bytecode::kLdaConstant, operand_scale, UnsignedOperand(entry));
   return *this;
 }
 
@@ -306,20 +344,12 @@
   return *this;
 }
 
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadBooleanConstant(bool value) {
-  if (value) {
-    LoadTrue();
-  } else {
-    LoadFalse();
-  }
-  return *this;
-}
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
     Register reg) {
   if (!IsRegisterInAccumulator(reg)) {
-    Output(Bytecode::kLdar, reg.ToRawOperand());
+    OperandScale operand_scale =
+        OperandSizesToScale(SizeForRegisterOperand(reg));
+    OutputScaled(Bytecode::kLdar, operand_scale, RegisterOperand(reg));
   }
   return *this;
 }
@@ -328,7 +358,9 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
     Register reg) {
   if (!IsRegisterInAccumulator(reg)) {
-    Output(Bytecode::kStar, reg.ToRawOperand());
+    OperandScale operand_scale =
+        OperandSizesToScale(SizeForRegisterOperand(reg));
+    OutputScaled(Bytecode::kStar, operand_scale, RegisterOperand(reg));
   }
   return *this;
 }
@@ -337,164 +369,98 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
                                                          Register to) {
   DCHECK(from != to);
-  if (FitsInReg8Operand(from) && FitsInReg8Operand(to)) {
-    Output(Bytecode::kMov, from.ToRawOperand(), to.ToRawOperand());
-  } else if (FitsInReg16Operand(from) && FitsInReg16Operand(to)) {
-    Output(Bytecode::kMovWide, from.ToRawOperand(), to.ToRawOperand());
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(from),
+                                                   SizeForRegisterOperand(to));
+  OutputScaled(Bytecode::kMov, operand_scale, RegisterOperand(from),
+               RegisterOperand(to));
   return *this;
 }
 
-void BytecodeArrayBuilder::MoveRegisterUntranslated(Register from,
-                                                    Register to) {
-  // Move bytecodes modify the stack. Checking validity is an
-  // essential mitigation against corrupting the stack.
-  if (FitsInReg8OperandUntranslated(from)) {
-    CHECK(RegisterIsValid(from, OperandType::kReg8) &&
-          RegisterIsValid(to, OperandType::kReg16));
-  } else if (FitsInReg8OperandUntranslated(to)) {
-    CHECK(RegisterIsValid(from, OperandType::kReg16) &&
-          RegisterIsValid(to, OperandType::kReg8));
-  } else {
-    UNIMPLEMENTED();
-  }
-  Output(Bytecode::kMovWide, from.ToRawOperand(), to.ToRawOperand());
-}
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
     const Handle<String> name, int feedback_slot, TypeofMode typeof_mode) {
   // TODO(rmcilroy): Potentially store typeof information in an
   // operand rather than having extra bytecodes.
   Bytecode bytecode = BytecodeForLoadGlobal(typeof_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, static_cast<uint8_t>(name_index),
-           static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInIdx16Operand(name_index) &&
-             FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), static_cast<uint16_t>(name_index),
-           static_cast<uint16_t>(feedback_slot));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForUnsignedOperand(name_index),
+                          SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
+               UnsignedOperand(feedback_slot));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
     const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, static_cast<uint8_t>(name_index),
-           static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInIdx16Operand(name_index) &&
-             FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), static_cast<uint16_t>(name_index),
-           static_cast<uint16_t>(feedback_slot));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForUnsignedOperand(name_index),
+                          SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
+               UnsignedOperand(feedback_slot));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
                                                             int slot_index) {
-  DCHECK(slot_index >= 0);
-  if (FitsInIdx8Operand(slot_index)) {
-    Output(Bytecode::kLdaContextSlot, context.ToRawOperand(),
-           static_cast<uint8_t>(slot_index));
-  } else if (FitsInIdx16Operand(slot_index)) {
-    Output(Bytecode::kLdaContextSlotWide, context.ToRawOperand(),
-           static_cast<uint16_t>(slot_index));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(context), SizeForUnsignedOperand(slot_index));
+  OutputScaled(Bytecode::kLdaContextSlot, operand_scale,
+               RegisterOperand(context), UnsignedOperand(slot_index));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
                                                              int slot_index) {
-  DCHECK(slot_index >= 0);
-  if (FitsInIdx8Operand(slot_index)) {
-    Output(Bytecode::kStaContextSlot, context.ToRawOperand(),
-           static_cast<uint8_t>(slot_index));
-  } else if (FitsInIdx16Operand(slot_index)) {
-    Output(Bytecode::kStaContextSlotWide, context.ToRawOperand(),
-           static_cast<uint16_t>(slot_index));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(context), SizeForUnsignedOperand(slot_index));
+  OutputScaled(Bytecode::kStaContextSlot, operand_scale,
+               RegisterOperand(context), UnsignedOperand(slot_index));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
     const Handle<String> name, TypeofMode typeof_mode) {
   Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
                           ? Bytecode::kLdaLookupSlotInsideTypeof
                           : Bytecode::kLdaLookupSlot;
   size_t name_index = GetConstantPoolEntry(name);
-  if (FitsInIdx8Operand(name_index)) {
-    Output(bytecode, static_cast<uint8_t>(name_index));
-  } else if (FitsInIdx16Operand(name_index)) {
-    Output(BytecodeForWideOperands(bytecode),
-           static_cast<uint16_t>(name_index));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForUnsignedOperand(name_index));
+  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
     const Handle<String> name, LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreLookupSlot(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  if (FitsInIdx8Operand(name_index)) {
-    Output(bytecode, static_cast<uint8_t>(name_index));
-  } else if (FitsInIdx16Operand(name_index)) {
-    Output(BytecodeForWideOperands(bytecode),
-           static_cast<uint16_t>(name_index));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForUnsignedOperand(name_index));
+  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
     Register object, const Handle<Name> name, int feedback_slot) {
   size_t name_index = GetConstantPoolEntry(name);
-  if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
-    Output(Bytecode::kLoadIC, object.ToRawOperand(),
-           static_cast<uint8_t>(name_index),
-           static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInIdx16Operand(name_index) &&
-             FitsInIdx16Operand(feedback_slot)) {
-    Output(Bytecode::kLoadICWide, object.ToRawOperand(),
-           static_cast<uint16_t>(name_index),
-           static_cast<uint16_t>(feedback_slot));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(object), SizeForUnsignedOperand(name_index),
+      SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(Bytecode::kLoadIC, operand_scale, RegisterOperand(object),
+               UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
     Register object, int feedback_slot) {
-  if (FitsInIdx8Operand(feedback_slot)) {
-    Output(Bytecode::kKeyedLoadIC, object.ToRawOperand(),
-           static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInIdx16Operand(feedback_slot)) {
-    Output(Bytecode::kKeyedLoadICWide, object.ToRawOperand(),
-           static_cast<uint16_t>(feedback_slot));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(object), SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(Bytecode::kKeyedLoadIC, operand_scale, RegisterOperand(object),
+               UnsignedOperand(feedback_slot));
   return *this;
 }
 
@@ -503,17 +469,11 @@
     LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreIC(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, object.ToRawOperand(), static_cast<uint8_t>(name_index),
-           static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInIdx16Operand(name_index) &&
-             FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), object.ToRawOperand(),
-           static_cast<uint16_t>(name_index),
-           static_cast<uint16_t>(feedback_slot));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(object), SizeForUnsignedOperand(name_index),
+      SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(bytecode, operand_scale, RegisterOperand(object),
+               UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
   return *this;
 }
 
@@ -522,15 +482,11 @@
     Register object, Register key, int feedback_slot,
     LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForKeyedStoreIC(language_mode);
-  if (FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, object.ToRawOperand(), key.ToRawOperand(),
-           static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), object.ToRawOperand(),
-           key.ToRawOperand(), static_cast<uint16_t>(feedback_slot));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(object), SizeForRegisterOperand(key),
+      SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(bytecode, operand_scale, RegisterOperand(object),
+               RegisterOperand(key), UnsignedOperand(feedback_slot));
   return *this;
 }
 
@@ -538,16 +494,10 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
     Handle<SharedFunctionInfo> shared_info, PretenureFlag tenured) {
   size_t entry = GetConstantPoolEntry(shared_info);
-  DCHECK(FitsInImm8Operand(tenured));
-  if (FitsInIdx8Operand(entry)) {
-    Output(Bytecode::kCreateClosure, static_cast<uint8_t>(entry),
-           static_cast<uint8_t>(tenured));
-  } else if (FitsInIdx16Operand(entry)) {
-    Output(Bytecode::kCreateClosureWide, static_cast<uint16_t>(entry),
-           static_cast<uint8_t>(tenured));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForUnsignedOperand(entry));
+  OutputScaled(Bytecode::kCreateClosure, operand_scale, UnsignedOperand(entry),
+               UnsignedOperand(static_cast<size_t>(tenured)));
   return *this;
 }
 
@@ -565,73 +515,55 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
     Handle<String> pattern, int literal_index, int flags) {
-  DCHECK(FitsInImm8Operand(flags));  // Flags should fit in 8 bits.
   size_t pattern_entry = GetConstantPoolEntry(pattern);
-  if (FitsInIdx8Operand(literal_index) && FitsInIdx8Operand(pattern_entry)) {
-    Output(Bytecode::kCreateRegExpLiteral, static_cast<uint8_t>(pattern_entry),
-           static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
-  } else if (FitsInIdx16Operand(literal_index) &&
-             FitsInIdx16Operand(pattern_entry)) {
-    Output(Bytecode::kCreateRegExpLiteralWide,
-           static_cast<uint16_t>(pattern_entry),
-           static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForUnsignedOperand(pattern_entry),
+      SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+  OutputScaled(Bytecode::kCreateRegExpLiteral, operand_scale,
+               UnsignedOperand(pattern_entry), UnsignedOperand(literal_index),
+               UnsignedOperand(flags));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
     Handle<FixedArray> constant_elements, int literal_index, int flags) {
-  DCHECK(FitsInImm8Operand(flags));  // Flags should fit in 8 bits.
   size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
-  if (FitsInIdx8Operand(literal_index) &&
-      FitsInIdx8Operand(constant_elements_entry)) {
-    Output(Bytecode::kCreateArrayLiteral,
-           static_cast<uint8_t>(constant_elements_entry),
-           static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
-  } else if (FitsInIdx16Operand(literal_index) &&
-             FitsInIdx16Operand(constant_elements_entry)) {
-    Output(Bytecode::kCreateArrayLiteralWide,
-           static_cast<uint16_t>(constant_elements_entry),
-           static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForUnsignedOperand(constant_elements_entry),
+      SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+  OutputScaled(Bytecode::kCreateArrayLiteral, operand_scale,
+               UnsignedOperand(constant_elements_entry),
+               UnsignedOperand(literal_index), UnsignedOperand(flags));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
     Handle<FixedArray> constant_properties, int literal_index, int flags) {
-  DCHECK(FitsInImm8Operand(flags));  // Flags should fit in 8 bits.
   size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
-  if (FitsInIdx8Operand(literal_index) &&
-      FitsInIdx8Operand(constant_properties_entry)) {
-    Output(Bytecode::kCreateObjectLiteral,
-           static_cast<uint8_t>(constant_properties_entry),
-           static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
-  } else if (FitsInIdx16Operand(literal_index) &&
-             FitsInIdx16Operand(constant_properties_entry)) {
-    Output(Bytecode::kCreateObjectLiteralWide,
-           static_cast<uint16_t>(constant_properties_entry),
-           static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForUnsignedOperand(constant_properties_entry),
+      SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+  OutputScaled(Bytecode::kCreateObjectLiteral, operand_scale,
+               UnsignedOperand(constant_properties_entry),
+               UnsignedOperand(literal_index), UnsignedOperand(flags));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
-  Output(Bytecode::kPushContext, context.ToRawOperand());
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForRegisterOperand(context));
+  OutputScaled(Bytecode::kPushContext, operand_scale, RegisterOperand(context));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
-  Output(Bytecode::kPopContext, context.ToRawOperand());
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForRegisterOperand(context));
+  OutputScaled(Bytecode::kPopContext, operand_scale, RegisterOperand(context));
   return *this;
 }
 
@@ -649,7 +581,6 @@
     case Bytecode::kTestEqual:
     case Bytecode::kTestNotEqual:
     case Bytecode::kTestEqualStrict:
-    case Bytecode::kTestNotEqualStrict:
     case Bytecode::kTestLessThan:
     case Bytecode::kTestLessThanOrEqual:
     case Bytecode::kTestGreaterThan:
@@ -677,7 +608,6 @@
       case Bytecode::kToName:
       case Bytecode::kTypeOf:
         return *this;
-      case Bytecode::kLdaConstantWide:
       case Bytecode::kLdaConstant: {
         Handle<Object> object = previous_bytecode.GetConstantForIndexOperand(0);
         if (object->IsName()) return *this;
@@ -716,8 +646,12 @@
                                                  BytecodeLabel* label) {
   DCHECK(!label->is_bound());
   DCHECK(target.is_bound());
-  PatchJump(bytecodes()->begin() + target.offset(),
-            bytecodes()->begin() + label->offset());
+  if (label->is_forward_target()) {
+    // An earlier jump instruction refers to this label. Update it's location.
+    PatchJump(bytecodes()->begin() + target.offset(),
+              bytecodes()->begin() + label->offset());
+    // Now treat as if the label will only be back referred to.
+  }
   label->bind_to(target.offset());
   LeaveBasicBlock();
   return *this;
@@ -746,38 +680,10 @@
       return Bytecode::kJumpIfUndefinedConstant;
     default:
       UNREACHABLE();
-      return static_cast<Bytecode>(-1);
+      return Bytecode::kIllegal;
   }
 }
 
-
-// static
-Bytecode BytecodeArrayBuilder::GetJumpWithConstantWideOperand(
-    Bytecode jump_bytecode) {
-  switch (jump_bytecode) {
-    case Bytecode::kJump:
-      return Bytecode::kJumpConstantWide;
-    case Bytecode::kJumpIfTrue:
-      return Bytecode::kJumpIfTrueConstantWide;
-    case Bytecode::kJumpIfFalse:
-      return Bytecode::kJumpIfFalseConstantWide;
-    case Bytecode::kJumpIfToBooleanTrue:
-      return Bytecode::kJumpIfToBooleanTrueConstantWide;
-    case Bytecode::kJumpIfToBooleanFalse:
-      return Bytecode::kJumpIfToBooleanFalseConstantWide;
-    case Bytecode::kJumpIfNotHole:
-      return Bytecode::kJumpIfNotHoleConstantWide;
-    case Bytecode::kJumpIfNull:
-      return Bytecode::kJumpIfNullConstantWide;
-    case Bytecode::kJumpIfUndefined:
-      return Bytecode::kJumpIfUndefinedConstantWide;
-    default:
-      UNREACHABLE();
-      return static_cast<Bytecode>(-1);
-  }
-}
-
-
 // static
 Bytecode BytecodeArrayBuilder::GetJumpWithToBoolean(Bytecode jump_bytecode) {
   switch (jump_bytecode) {
@@ -793,7 +699,7 @@
     default:
       UNREACHABLE();
   }
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 
@@ -803,54 +709,88 @@
   DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
   ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
   DCHECK_EQ(*operand_location, 0);
-  if (FitsInImm8Operand(delta)) {
-    // The jump fits within the range of an Imm8 operand, so cancel
+  if (SizeForSignedOperand(delta) == OperandSize::kByte) {
+    // The jump fits within the range of an Imm operand, so cancel
     // the reservation and jump directly.
     constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
     *operand_location = static_cast<uint8_t>(delta);
   } else {
-    // The jump does not fit within the range of an Imm8 operand, so
+    // The jump does not fit within the range of an Imm operand, so
     // commit reservation putting the offset into the constant pool,
     // and update the jump instruction and operand.
     size_t entry = constant_array_builder()->CommitReservedEntry(
         OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
-    DCHECK(FitsInIdx8Operand(entry));
+    DCHECK(SizeForUnsignedOperand(entry) == OperandSize::kByte);
     jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
     *jump_location = Bytecodes::ToByte(jump_bytecode);
     *operand_location = static_cast<uint8_t>(entry);
   }
 }
 
-
 void BytecodeArrayBuilder::PatchIndirectJumpWith16BitOperand(
     const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
-  DCHECK(Bytecodes::IsJumpConstantWide(Bytecodes::FromByte(*jump_location)));
+  Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+  DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
   ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
-  size_t entry = constant_array_builder()->CommitReservedEntry(
-      OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
-  DCHECK(FitsInIdx16Operand(entry));
   uint8_t operand_bytes[2];
-  WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
+  if (SizeForSignedOperand(delta) <= OperandSize::kShort) {
+    constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
+    WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
+  } else {
+    jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+    *jump_location = Bytecodes::ToByte(jump_bytecode);
+    size_t entry = constant_array_builder()->CommitReservedEntry(
+        OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
+    WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
+  }
   DCHECK(*operand_location == 0 && *(operand_location + 1) == 0);
   *operand_location++ = operand_bytes[0];
   *operand_location = operand_bytes[1];
 }
 
+void BytecodeArrayBuilder::PatchIndirectJumpWith32BitOperand(
+    const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
+  DCHECK(Bytecodes::IsJumpImmediate(Bytecodes::FromByte(*jump_location)));
+  constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
+  ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
+  uint8_t operand_bytes[4];
+  WriteUnalignedUInt32(operand_bytes, static_cast<uint32_t>(delta));
+  DCHECK(*operand_location == 0 && *(operand_location + 1) == 0 &&
+         *(operand_location + 2) == 0 && *(operand_location + 3) == 0);
+  *operand_location++ = operand_bytes[0];
+  *operand_location++ = operand_bytes[1];
+  *operand_location++ = operand_bytes[2];
+  *operand_location = operand_bytes[3];
+}
 
 void BytecodeArrayBuilder::PatchJump(
     const ZoneVector<uint8_t>::iterator& jump_target,
     const ZoneVector<uint8_t>::iterator& jump_location) {
-  Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
   int delta = static_cast<int>(jump_target - jump_location);
+  Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+  int prefix_offset = 0;
+  OperandScale operand_scale = OperandScale::kSingle;
+  if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) {
+    // If a prefix scaling bytecode is emitted the target offset is one
+    // less than the case of no prefix scaling bytecode.
+    delta -= 1;
+    prefix_offset = 1;
+    operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode);
+    jump_bytecode = Bytecodes::FromByte(*(jump_location + prefix_offset));
+  }
+
   DCHECK(Bytecodes::IsJump(jump_bytecode));
-  switch (Bytecodes::GetOperandSize(jump_bytecode, 0)) {
-    case OperandSize::kByte:
+  switch (operand_scale) {
+    case OperandScale::kSingle:
       PatchIndirectJumpWith8BitOperand(jump_location, delta);
       break;
-    case OperandSize::kShort:
-      PatchIndirectJumpWith16BitOperand(jump_location, delta);
+    case OperandScale::kDouble:
+      PatchIndirectJumpWith16BitOperand(jump_location + prefix_offset, delta);
       break;
-    case OperandSize::kNone:
+    case OperandScale::kQuadruple:
+      PatchIndirectJumpWith32BitOperand(jump_location + prefix_offset, delta);
+      break;
+    default:
       UNREACHABLE();
   }
   unbound_jumps_--;
@@ -860,10 +800,7 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
                                                        BytecodeLabel* label) {
   // Don't emit dead code.
-  if (exit_seen_in_block_) {
-    source_position_table_builder_.RevertPosition(bytecodes()->size());
-    return *this;
-  }
+  if (exit_seen_in_block_) return *this;
 
   // Check if the value in accumulator is boolean, if not choose an
   // appropriate JumpIfToBoolean bytecode.
@@ -877,22 +814,14 @@
     CHECK_LE(bytecodes()->size(), static_cast<size_t>(kMaxInt));
     size_t abs_delta = bytecodes()->size() - label->offset();
     int delta = -static_cast<int>(abs_delta);
-
-    if (FitsInImm8Operand(delta)) {
-      Output(jump_bytecode, static_cast<uint8_t>(delta));
-    } else {
-      size_t entry =
-          GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
-      if (FitsInIdx8Operand(entry)) {
-        Output(GetJumpWithConstantOperand(jump_bytecode),
-               static_cast<uint8_t>(entry));
-      } else if (FitsInIdx16Operand(entry)) {
-        Output(GetJumpWithConstantWideOperand(jump_bytecode),
-               static_cast<uint16_t>(entry));
-      } else {
-        UNREACHABLE();
-      }
+    OperandSize operand_size = SizeForSignedOperand(delta);
+    if (operand_size > OperandSize::kByte) {
+      // Adjust for scaling byte prefix for wide jump offset.
+      DCHECK_LE(delta, 0);
+      delta -= 1;
     }
+    OutputScaled(jump_bytecode, OperandSizesToScale(operand_size),
+                 SignedOperand(delta, operand_size));
   } else {
     // The label has not yet been bound so this is a forward reference
     // that will be patched when the label is bound. We create a
@@ -904,16 +833,7 @@
     unbound_jumps_++;
     OperandSize reserved_operand_size =
         constant_array_builder()->CreateReservedEntry();
-    switch (reserved_operand_size) {
-      case OperandSize::kByte:
-        Output(jump_bytecode, 0);
-        break;
-      case OperandSize::kShort:
-        Output(GetJumpWithConstantWideOperand(jump_bytecode), 0);
-        break;
-      case OperandSize::kNone:
-        UNREACHABLE();
-    }
+    OutputScaled(jump_bytecode, OperandSizesToScale(reserved_operand_size), 0);
   }
   LeaveBasicBlock();
   return *this;
@@ -970,6 +890,7 @@
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
+  SetReturnPosition();
   Output(Bytecode::kReturn);
   exit_seen_in_block_ = true;
   return *this;
@@ -982,44 +903,40 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
     Register cache_info_triple) {
-  if (FitsInReg8Operand(cache_info_triple)) {
-    Output(Bytecode::kForInPrepare, cache_info_triple.ToRawOperand());
-  } else if (FitsInReg16Operand(cache_info_triple)) {
-    Output(Bytecode::kForInPrepareWide, cache_info_triple.ToRawOperand());
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForRegisterOperand(cache_info_triple));
+  OutputScaled(Bytecode::kForInPrepare, operand_scale,
+               RegisterOperand(cache_info_triple));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
                                                       Register cache_length) {
-  Output(Bytecode::kForInDone, index.ToRawOperand(),
-         cache_length.ToRawOperand());
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(index), SizeForRegisterOperand(cache_length));
+  OutputScaled(Bytecode::kForInDone, operand_scale, RegisterOperand(index),
+               RegisterOperand(cache_length));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
-    Register receiver, Register index, Register cache_type_array_pair) {
-  if (FitsInReg8Operand(receiver) && FitsInReg8Operand(index) &&
-      FitsInReg8Operand(cache_type_array_pair)) {
-    Output(Bytecode::kForInNext, receiver.ToRawOperand(), index.ToRawOperand(),
-           cache_type_array_pair.ToRawOperand());
-  } else if (FitsInReg16Operand(receiver) && FitsInReg16Operand(index) &&
-             FitsInReg16Operand(cache_type_array_pair)) {
-    Output(Bytecode::kForInNextWide, receiver.ToRawOperand(),
-           index.ToRawOperand(), cache_type_array_pair.ToRawOperand());
-  } else {
-    UNIMPLEMENTED();
-  }
+    Register receiver, Register index, Register cache_type_array_pair,
+    int feedback_slot) {
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(receiver), SizeForRegisterOperand(index),
+      SizeForRegisterOperand(cache_type_array_pair),
+      SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(Bytecode::kForInNext, operand_scale, RegisterOperand(receiver),
+               RegisterOperand(index), RegisterOperand(cache_type_array_pair),
+               UnsignedOperand(feedback_slot));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
-  Output(Bytecode::kForInStep, index.ToRawOperand());
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForRegisterOperand(index));
+  OutputScaled(Bytecode::kForInStep, operand_scale, RegisterOperand(index));
   return *this;
 }
 
@@ -1051,12 +968,12 @@
   exit_seen_in_block_ = false;
 }
 
-void BytecodeArrayBuilder::EnsureReturn(FunctionLiteral* literal) {
+void BytecodeArrayBuilder::EnsureReturn() {
   if (!exit_seen_in_block_) {
     LoadUndefined();
-    SetReturnPosition(literal);
     Return();
   }
+  DCHECK(exit_seen_in_block_);
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
@@ -1065,23 +982,14 @@
                                                  int feedback_slot,
                                                  TailCallMode tail_call_mode) {
   Bytecode bytecode = BytecodeForCall(tail_call_mode);
-  if (FitsInReg8Operand(callable) && FitsInReg8Operand(receiver_args) &&
-      FitsInIdx8Operand(receiver_args_count) &&
-      FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, callable.ToRawOperand(), receiver_args.ToRawOperand(),
-           static_cast<uint8_t>(receiver_args_count),
-           static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInReg16Operand(callable) &&
-             FitsInReg16Operand(receiver_args) &&
-             FitsInIdx16Operand(receiver_args_count) &&
-             FitsInIdx16Operand(feedback_slot)) {
-    bytecode = BytecodeForWideOperands(bytecode);
-    Output(bytecode, callable.ToRawOperand(), receiver_args.ToRawOperand(),
-           static_cast<uint16_t>(receiver_args_count),
-           static_cast<uint16_t>(feedback_slot));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(callable), SizeForRegisterOperand(receiver_args),
+      SizeForUnsignedOperand(receiver_args_count),
+      SizeForUnsignedOperand(feedback_slot));
+  OutputScaled(bytecode, operand_scale, RegisterOperand(callable),
+               RegisterOperand(receiver_args),
+               UnsignedOperand(receiver_args_count),
+               UnsignedOperand(feedback_slot));
   return *this;
 }
 
@@ -1092,17 +1000,11 @@
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  if (FitsInReg8Operand(constructor) && FitsInReg8Operand(first_arg) &&
-      FitsInIdx8Operand(arg_count)) {
-    Output(Bytecode::kNew, constructor.ToRawOperand(), first_arg.ToRawOperand(),
-           static_cast<uint8_t>(arg_count));
-  } else if (FitsInReg16Operand(constructor) && FitsInReg16Operand(first_arg) &&
-             FitsInIdx16Operand(arg_count)) {
-    Output(Bytecode::kNewWide, constructor.ToRawOperand(),
-           first_arg.ToRawOperand(), static_cast<uint16_t>(arg_count));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(constructor), SizeForRegisterOperand(first_arg),
+      SizeForUnsignedOperand(arg_count));
+  OutputScaled(Bytecode::kNew, operand_scale, RegisterOperand(constructor),
+               RegisterOperand(first_arg), UnsignedOperand(arg_count));
   return *this;
 }
 
@@ -1110,20 +1012,18 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
     Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
   DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
-  DCHECK(FitsInIdx16Operand(function_id));
+  DCHECK(SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
   if (!first_arg.is_valid()) {
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  if (FitsInReg8Operand(first_arg) && FitsInIdx8Operand(arg_count)) {
-    Output(Bytecode::kCallRuntime, static_cast<uint16_t>(function_id),
-           first_arg.ToRawOperand(), static_cast<uint8_t>(arg_count));
-  } else if (FitsInReg16Operand(first_arg) && FitsInIdx16Operand(arg_count)) {
-    Output(Bytecode::kCallRuntimeWide, static_cast<uint16_t>(function_id),
-           first_arg.ToRawOperand(), static_cast<uint16_t>(arg_count));
-  } else {
-    UNIMPLEMENTED();
-  }
+  Bytecode bytecode = IntrinsicsHelper::IsSupported(function_id)
+                          ? Bytecode::kInvokeIntrinsic
+                          : Bytecode::kCallRuntime;
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(first_arg), SizeForUnsignedOperand(arg_count));
+  OutputScaled(bytecode, operand_scale, static_cast<uint16_t>(function_id),
+               RegisterOperand(first_arg), UnsignedOperand(arg_count));
   return *this;
 }
 
@@ -1132,180 +1032,145 @@
     Runtime::FunctionId function_id, Register first_arg, size_t arg_count,
     Register first_return) {
   DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
-  DCHECK(FitsInIdx16Operand(function_id));
+  DCHECK(SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
   if (!first_arg.is_valid()) {
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  if (FitsInReg8Operand(first_arg) && FitsInIdx8Operand(arg_count) &&
-      FitsInReg8Operand(first_return)) {
-    Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
-           first_arg.ToRawOperand(), static_cast<uint8_t>(arg_count),
-           first_return.ToRawOperand());
-  } else if (FitsInReg16Operand(first_arg) && FitsInIdx16Operand(arg_count) &&
-             FitsInReg16Operand(first_return)) {
-    Output(Bytecode::kCallRuntimeForPairWide,
-           static_cast<uint16_t>(function_id), first_arg.ToRawOperand(),
-           static_cast<uint16_t>(arg_count), first_return.ToRawOperand());
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale = OperandSizesToScale(
+      SizeForRegisterOperand(first_arg), SizeForUnsignedOperand(arg_count),
+      SizeForRegisterOperand(first_return));
+  OutputScaled(Bytecode::kCallRuntimeForPair, operand_scale,
+               static_cast<uint16_t>(function_id), RegisterOperand(first_arg),
+               UnsignedOperand(arg_count), RegisterOperand(first_return));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(
     int context_index, Register receiver_args, size_t receiver_args_count) {
-  DCHECK(FitsInIdx16Operand(context_index));
-  if (FitsInReg8Operand(receiver_args) &&
-      FitsInIdx8Operand(receiver_args_count)) {
-    Output(Bytecode::kCallJSRuntime, static_cast<uint16_t>(context_index),
-           receiver_args.ToRawOperand(),
-           static_cast<uint8_t>(receiver_args_count));
-  } else if (FitsInReg16Operand(receiver_args) &&
-             FitsInIdx16Operand(receiver_args_count)) {
-    Output(Bytecode::kCallJSRuntimeWide, static_cast<uint16_t>(context_index),
-           receiver_args.ToRawOperand(),
-           static_cast<uint16_t>(receiver_args_count));
-  } else {
-    UNIMPLEMENTED();
-  }
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForUnsignedOperand(context_index),
+                          SizeForRegisterOperand(receiver_args),
+                          SizeForUnsignedOperand(receiver_args_count));
+  OutputScaled(Bytecode::kCallJSRuntime, operand_scale,
+               UnsignedOperand(context_index), RegisterOperand(receiver_args),
+               UnsignedOperand(receiver_args_count));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
                                                    LanguageMode language_mode) {
-  Output(BytecodeForDelete(language_mode), object.ToRawOperand());
+  OperandScale operand_scale =
+      OperandSizesToScale(SizeForRegisterOperand(object));
+  OutputScaled(BytecodeForDelete(language_mode), operand_scale,
+               RegisterOperand(object));
   return *this;
 }
 
-
 size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
   return constant_array_builder()->Insert(object);
 }
 
-void BytecodeArrayBuilder::SetReturnPosition(FunctionLiteral* fun) {
-  int pos = std::max(fun->start_position(), fun->end_position() - 1);
-  source_position_table_builder_.AddStatementPosition(bytecodes_.size(), pos);
+void BytecodeArrayBuilder::SetReturnPosition() {
+  if (return_position_ == RelocInfo::kNoPosition) return;
+  if (exit_seen_in_block_) return;
+  source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
+                                                      return_position_);
 }
 
 void BytecodeArrayBuilder::SetStatementPosition(Statement* stmt) {
   if (stmt->position() == RelocInfo::kNoPosition) return;
+  if (exit_seen_in_block_) return;
   source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
                                                       stmt->position());
 }
 
 void BytecodeArrayBuilder::SetExpressionPosition(Expression* expr) {
   if (expr->position() == RelocInfo::kNoPosition) return;
+  if (exit_seen_in_block_) return;
   source_position_table_builder_.AddExpressionPosition(bytecodes_.size(),
                                                        expr->position());
 }
 
+void BytecodeArrayBuilder::SetExpressionAsStatementPosition(Expression* expr) {
+  if (expr->position() == RelocInfo::kNoPosition) return;
+  if (exit_seen_in_block_) return;
+  source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
+                                                      expr->position());
+}
+
 bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
   return temporary_register_allocator()->RegisterIsLive(reg);
 }
 
-bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
+bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode,
+                                          OperandScale operand_scale,
+                                          int operand_index,
                                           uint32_t operand_value) const {
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode, operand_index, operand_scale);
   OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
   switch (operand_type) {
     case OperandType::kNone:
       return false;
-    case OperandType::kRegCount16: {
-      // Expect kRegCount16 is part of a range previous operand is a
-      // valid operand to start a range.
+    case OperandType::kRegCount: {
       if (operand_index > 0) {
         OperandType previous_operand_type =
             Bytecodes::GetOperandType(bytecode, operand_index - 1);
-        return ((previous_operand_type == OperandType::kMaybeReg16 ||
-                 previous_operand_type == OperandType::kReg16) &&
-                static_cast<uint16_t>(operand_value) == operand_value);
-      } else {
-        return false;
+        if (previous_operand_type != OperandType::kMaybeReg &&
+            previous_operand_type != OperandType::kReg) {
+          return false;
+        }
       }
+    }  // Fall-through
+    case OperandType::kFlag8:
+    case OperandType::kIdx:
+    case OperandType::kRuntimeId:
+    case OperandType::kImm: {
+      size_t unsigned_value = static_cast<size_t>(operand_value);
+      return SizeForUnsignedOperand(unsigned_value) <= operand_size;
     }
-    case OperandType::kRegCount8: {
-      // Expect kRegCount8 is part of a range previous operand is a
-      // valid operand to start a range.
-      if (operand_index > 0) {
-        OperandType previous_operand_type =
-            Bytecodes::GetOperandType(bytecode, operand_index - 1);
-        return ((previous_operand_type == OperandType::kMaybeReg8 ||
-                 previous_operand_type == OperandType::kReg8 ||
-                 previous_operand_type == OperandType::kMaybeReg16) &&
-                static_cast<uint8_t>(operand_value) == operand_value);
-      } else {
-        return false;
-      }
-    }
-    case OperandType::kIdx16:
-      return static_cast<uint16_t>(operand_value) == operand_value;
-    case OperandType::kImm8:
-    case OperandType::kIdx8:
-      return static_cast<uint8_t>(operand_value) == operand_value;
-    case OperandType::kMaybeReg8:
+    case OperandType::kMaybeReg:
       if (operand_value == 0) {
         return true;
       }
-    // Fall-through to kReg8 case.
-    case OperandType::kReg8:
-    case OperandType::kRegOut8:
-      return RegisterIsValid(Register::FromRawOperand(operand_value),
-                             operand_type);
-    case OperandType::kRegOutPair8:
-    case OperandType::kRegOutPair16:
-    case OperandType::kRegPair8:
-    case OperandType::kRegPair16: {
-      Register reg0 = Register::FromRawOperand(operand_value);
-      Register reg1 = Register(reg0.index() + 1);
-      return RegisterIsValid(reg0, operand_type) &&
-             RegisterIsValid(reg1, operand_type);
+    // Fall-through to kReg case.
+    case OperandType::kReg:
+    case OperandType::kRegOut: {
+      Register reg = RegisterFromOperand(operand_value);
+      return RegisterIsValid(reg, operand_size);
     }
-    case OperandType::kRegOutTriple8:
-    case OperandType::kRegOutTriple16: {
-      Register reg0 = Register::FromRawOperand(operand_value);
+    case OperandType::kRegOutPair:
+    case OperandType::kRegPair: {
+      Register reg0 = RegisterFromOperand(operand_value);
+      Register reg1 = Register(reg0.index() + 1);
+      // The size of reg1 is immaterial.
+      return RegisterIsValid(reg0, operand_size) &&
+             RegisterIsValid(reg1, OperandSize::kQuad);
+    }
+    case OperandType::kRegOutTriple: {
+      Register reg0 = RegisterFromOperand(operand_value);
       Register reg1 = Register(reg0.index() + 1);
       Register reg2 = Register(reg0.index() + 2);
-      return RegisterIsValid(reg0, operand_type) &&
-             RegisterIsValid(reg1, operand_type) &&
-             RegisterIsValid(reg2, operand_type);
-    }
-    case OperandType::kMaybeReg16:
-      if (operand_value == 0) {
-        return true;
-      }
-    // Fall-through to kReg16 case.
-    case OperandType::kReg16:
-    case OperandType::kRegOut16: {
-      Register reg = Register::FromRawOperand(operand_value);
-      return RegisterIsValid(reg, operand_type);
+      // The size of reg1 and reg2 is immaterial.
+      return RegisterIsValid(reg0, operand_size) &&
+             RegisterIsValid(reg1, OperandSize::kQuad) &&
+             RegisterIsValid(reg2, OperandSize::kQuad);
     }
   }
   UNREACHABLE();
   return false;
 }
 
-
 bool BytecodeArrayBuilder::RegisterIsValid(Register reg,
-                                           OperandType reg_type) const {
+                                           OperandSize reg_size) const {
   if (!reg.is_valid()) {
     return false;
   }
 
-  switch (Bytecodes::SizeOfOperand(reg_type)) {
-    case OperandSize::kByte:
-      if (!FitsInReg8OperandUntranslated(reg)) {
-        return false;
-      }
-      break;
-    case OperandSize::kShort:
-      if (!FitsInReg16OperandUntranslated(reg)) {
-        return false;
-      }
-      break;
-    case OperandSize::kNone:
-      UNREACHABLE();
-      return false;
+  if (SizeForRegisterOperand(reg) > reg_size) {
+    return false;
   }
 
   if (reg.is_current_context() || reg.is_function_closure() ||
@@ -1314,15 +1179,10 @@
   } else if (reg.is_parameter()) {
     int parameter_index = reg.ToParameterIndex(parameter_count());
     return parameter_index >= 0 && parameter_index < parameter_count();
-  } else if (RegisterTranslator::InTranslationWindow(reg)) {
-    return translation_register_count() > 0;
+  } else if (reg.index() < fixed_register_count()) {
+    return true;
   } else {
-    reg = RegisterTranslator::UntranslateRegister(reg);
-    if (reg.index() < fixed_register_count()) {
-      return true;
-    } else {
-      return TemporaryRegisterIsLive(reg);
-    }
+    return TemporaryRegisterIsLive(reg);
   }
 }
 
@@ -1338,9 +1198,7 @@
     PreviousBytecodeHelper previous_bytecode(*this);
     Bytecode bytecode = previous_bytecode.GetBytecode();
     if (bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) {
-      Register previous_reg =
-          Register::FromOperand(previous_bytecode.GetOperand(0));
-      return previous_reg == reg;
+      return previous_bytecode.GetRegisterOperand(0) == reg;
     }
   }
   return false;
@@ -1374,7 +1232,7 @@
       return Bytecode::kShiftRightLogical;
     default:
       UNREACHABLE();
-      return static_cast<Bytecode>(-1);
+      return Bytecode::kIllegal;
   }
 }
 
@@ -1388,7 +1246,7 @@
       return Bytecode::kDec;
     default:
       UNREACHABLE();
-      return static_cast<Bytecode>(-1);
+      return Bytecode::kIllegal;
   }
 }
 
@@ -1402,8 +1260,6 @@
       return Bytecode::kTestNotEqual;
     case Token::Value::EQ_STRICT:
       return Bytecode::kTestEqualStrict;
-    case Token::Value::NE_STRICT:
-      return Bytecode::kTestNotEqualStrict;
     case Token::Value::LT:
       return Bytecode::kTestLessThan;
     case Token::Value::GT:
@@ -1418,49 +1274,7 @@
       return Bytecode::kTestIn;
     default:
       UNREACHABLE();
-      return static_cast<Bytecode>(-1);
-  }
-}
-
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForWideOperands(Bytecode bytecode) {
-  switch (bytecode) {
-    case Bytecode::kCall:
-      return Bytecode::kCallWide;
-    case Bytecode::kTailCall:
-      return Bytecode::kTailCallWide;
-    case Bytecode::kLoadIC:
-      return Bytecode::kLoadICWide;
-    case Bytecode::kKeyedLoadIC:
-      return Bytecode::kKeyedLoadICWide;
-    case Bytecode::kStoreICSloppy:
-      return Bytecode::kStoreICSloppyWide;
-    case Bytecode::kStoreICStrict:
-      return Bytecode::kStoreICStrictWide;
-    case Bytecode::kKeyedStoreICSloppy:
-      return Bytecode::kKeyedStoreICSloppyWide;
-    case Bytecode::kKeyedStoreICStrict:
-      return Bytecode::kKeyedStoreICStrictWide;
-    case Bytecode::kLdaGlobal:
-      return Bytecode::kLdaGlobalWide;
-    case Bytecode::kLdaGlobalInsideTypeof:
-      return Bytecode::kLdaGlobalInsideTypeofWide;
-    case Bytecode::kStaGlobalSloppy:
-      return Bytecode::kStaGlobalSloppyWide;
-    case Bytecode::kStaGlobalStrict:
-      return Bytecode::kStaGlobalStrictWide;
-    case Bytecode::kLdaLookupSlot:
-      return Bytecode::kLdaLookupSlotWide;
-    case Bytecode::kLdaLookupSlotInsideTypeof:
-      return Bytecode::kLdaLookupSlotInsideTypeofWide;
-    case Bytecode::kStaLookupSlotStrict:
-      return Bytecode::kStaLookupSlotStrictWide;
-    case Bytecode::kStaLookupSlotSloppy:
-      return Bytecode::kStaLookupSlotSloppyWide;
-    default:
-      UNREACHABLE();
-      return static_cast<Bytecode>(-1);
+      return Bytecode::kIllegal;
   }
 }
 
@@ -1472,12 +1286,10 @@
       return Bytecode::kStoreICSloppy;
     case STRICT:
       return Bytecode::kStoreICStrict;
-    case STRONG:
-      UNIMPLEMENTED();
     default:
       UNREACHABLE();
   }
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 
@@ -1489,12 +1301,10 @@
       return Bytecode::kKeyedStoreICSloppy;
     case STRICT:
       return Bytecode::kKeyedStoreICStrict;
-    case STRONG:
-      UNIMPLEMENTED();
     default:
       UNREACHABLE();
   }
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 
@@ -1513,12 +1323,10 @@
       return Bytecode::kStaGlobalSloppy;
     case STRICT:
       return Bytecode::kStaGlobalStrict;
-    case STRONG:
-      UNIMPLEMENTED();
     default:
       UNREACHABLE();
   }
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 
@@ -1530,12 +1338,10 @@
       return Bytecode::kStaLookupSlotSloppy;
     case STRICT:
       return Bytecode::kStaLookupSlotStrict;
-    case STRONG:
-      UNIMPLEMENTED();
     default:
       UNREACHABLE();
   }
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 // static
@@ -1550,7 +1356,7 @@
       return Bytecode::kCreateRestParameter;
   }
   UNREACHABLE();
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 
@@ -1561,12 +1367,10 @@
       return Bytecode::kDeletePropertySloppy;
     case STRICT:
       return Bytecode::kDeletePropertyStrict;
-    case STRONG:
-      UNIMPLEMENTED();
     default:
       UNREACHABLE();
   }
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 // static
@@ -1579,58 +1383,109 @@
     default:
       UNREACHABLE();
   }
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 // static
-bool BytecodeArrayBuilder::FitsInIdx8Operand(int value) {
-  return kMinUInt8 <= value && value <= kMaxUInt8;
-}
-
-
-// static
-bool BytecodeArrayBuilder::FitsInIdx8Operand(size_t value) {
-  return value <= static_cast<size_t>(kMaxUInt8);
-}
-
-
-// static
-bool BytecodeArrayBuilder::FitsInImm8Operand(int value) {
-  return kMinInt8 <= value && value <= kMaxInt8;
-}
-
-
-// static
-bool BytecodeArrayBuilder::FitsInIdx16Operand(int value) {
-  return kMinUInt16 <= value && value <= kMaxUInt16;
-}
-
-
-// static
-bool BytecodeArrayBuilder::FitsInIdx16Operand(size_t value) {
-  return value <= static_cast<size_t>(kMaxUInt16);
-}
-
-
-// static
-bool BytecodeArrayBuilder::FitsInReg8Operand(Register value) {
-  return RegisterTranslator::FitsInReg8Operand(value);
+OperandSize BytecodeArrayBuilder::SizeForRegisterOperand(Register value) {
+  if (value.is_byte_operand()) {
+    return OperandSize::kByte;
+  } else if (value.is_short_operand()) {
+    return OperandSize::kShort;
+  } else {
+    return OperandSize::kQuad;
+  }
 }
 
 // static
-bool BytecodeArrayBuilder::FitsInReg8OperandUntranslated(Register value) {
-  return value.is_byte_operand();
-}
-
-
-// static
-bool BytecodeArrayBuilder::FitsInReg16Operand(Register value) {
-  return RegisterTranslator::FitsInReg16Operand(value);
+OperandSize BytecodeArrayBuilder::SizeForSignedOperand(int value) {
+  if (kMinInt8 <= value && value <= kMaxInt8) {
+    return OperandSize::kByte;
+  } else if (kMinInt16 <= value && value <= kMaxInt16) {
+    return OperandSize::kShort;
+  } else {
+    return OperandSize::kQuad;
+  }
 }
 
 // static
-bool BytecodeArrayBuilder::FitsInReg16OperandUntranslated(Register value) {
-  return value.is_short_operand();
+OperandSize BytecodeArrayBuilder::SizeForUnsignedOperand(int value) {
+  DCHECK_GE(value, 0);
+  if (value <= kMaxUInt8) {
+    return OperandSize::kByte;
+  } else if (value <= kMaxUInt16) {
+    return OperandSize::kShort;
+  } else {
+    return OperandSize::kQuad;
+  }
+}
+
+OperandSize BytecodeArrayBuilder::SizeForUnsignedOperand(size_t value) {
+  if (value <= static_cast<size_t>(kMaxUInt8)) {
+    return OperandSize::kByte;
+  } else if (value <= static_cast<size_t>(kMaxUInt16)) {
+    return OperandSize::kShort;
+  } else if (value <= kMaxUInt32) {
+    return OperandSize::kQuad;
+  } else {
+    UNREACHABLE();
+    return OperandSize::kQuad;
+  }
+}
+
+OperandScale BytecodeArrayBuilder::OperandSizesToScale(OperandSize size0,
+                                                       OperandSize size1,
+                                                       OperandSize size2,
+                                                       OperandSize size3) {
+  OperandSize upper = std::max(size0, size1);
+  OperandSize lower = std::max(size2, size3);
+  OperandSize result = std::max(upper, lower);
+  // Operand sizes have been scaled before calling this function.
+  // Currently all scalable operands are byte sized at
+  // OperandScale::kSingle.
+  STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
+                    static_cast<int>(OperandScale::kSingle) &&
+                static_cast<int>(OperandSize::kShort) ==
+                    static_cast<int>(OperandScale::kDouble) &&
+                static_cast<int>(OperandSize::kQuad) ==
+                    static_cast<int>(OperandScale::kQuadruple));
+  OperandScale operand_scale = static_cast<OperandScale>(result);
+  DCHECK(operand_scale == OperandScale::kSingle ||
+         operand_scale == OperandScale::kDouble ||
+         operand_scale == OperandScale::kQuadruple);
+  return operand_scale;
+}
+
+uint32_t BytecodeArrayBuilder::RegisterOperand(Register reg) {
+  return static_cast<uint32_t>(reg.ToOperand());
+}
+
+Register BytecodeArrayBuilder::RegisterFromOperand(uint32_t operand) {
+  return Register::FromOperand(static_cast<int32_t>(operand));
+}
+
+uint32_t BytecodeArrayBuilder::SignedOperand(int value, OperandSize size) {
+  switch (size) {
+    case OperandSize::kByte:
+      return static_cast<uint8_t>(value & 0xff);
+    case OperandSize::kShort:
+      return static_cast<uint16_t>(value & 0xffff);
+    case OperandSize::kQuad:
+      return static_cast<uint32_t>(value);
+    case OperandSize::kNone:
+      UNREACHABLE();
+  }
+  return 0;
+}
+
+uint32_t BytecodeArrayBuilder::UnsignedOperand(int value) {
+  DCHECK_GE(value, 0);
+  return static_cast<uint32_t>(value);
+}
+
+uint32_t BytecodeArrayBuilder::UnsignedOperand(size_t value) {
+  DCHECK_LE(value, kMaxUInt32);
+  return static_cast<uint32_t>(value);
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-array-builder.h b/src/interpreter/bytecode-array-builder.h
index fe69337..4446a63 100644
--- a/src/interpreter/bytecode-array-builder.h
+++ b/src/interpreter/bytecode-array-builder.h
@@ -10,7 +10,6 @@
 #include "src/interpreter/bytecodes.h"
 #include "src/interpreter/constant-array-builder.h"
 #include "src/interpreter/handler-table-builder.h"
-#include "src/interpreter/register-translator.h"
 #include "src/interpreter/source-position-table.h"
 #include "src/zone-containers.h"
 
@@ -24,10 +23,11 @@
 class BytecodeLabel;
 class Register;
 
-class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
+class BytecodeArrayBuilder final : public ZoneObject {
  public:
   BytecodeArrayBuilder(Isolate* isolate, Zone* zone, int parameter_count,
-                       int context_count, int locals_count);
+                       int context_count, int locals_count,
+                       FunctionLiteral* literal = nullptr);
   ~BytecodeArrayBuilder();
 
   Handle<BytecodeArray> ToBytecodeArray();
@@ -65,13 +65,6 @@
     return temporary_register_allocator()->allocation_count();
   }
 
-  // Returns the number of registers used for translating wide
-  // register operands into byte sized register operands.
-  int translation_register_count() const {
-    return RegisterTranslator::RegisterCountAdjustment(
-        fixed_and_temporary_register_count(), parameter_count());
-  }
-
   Register Parameter(int parameter_index) const;
 
   // Return true if the register |reg| represents a parameter or a
@@ -89,7 +82,6 @@
   BytecodeArrayBuilder& LoadTheHole();
   BytecodeArrayBuilder& LoadTrue();
   BytecodeArrayBuilder& LoadFalse();
-  BytecodeArrayBuilder& LoadBooleanConstant(bool value);
 
   // Global loads to the accumulator and stores from the accumulator.
   BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
@@ -245,7 +237,8 @@
   BytecodeArrayBuilder& ForInPrepare(Register cache_info_triple);
   BytecodeArrayBuilder& ForInDone(Register index, Register cache_length);
   BytecodeArrayBuilder& ForInNext(Register receiver, Register index,
-                                  Register cache_type_array_pair);
+                                  Register cache_type_array_pair,
+                                  int feedback_slot);
   BytecodeArrayBuilder& ForInStep(Register index);
 
   // Exception handling.
@@ -257,8 +250,11 @@
   // entry, so that it can be referenced by above exception handling support.
   int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
 
+  void InitializeReturnPosition(FunctionLiteral* literal);
+
   void SetStatementPosition(Statement* stmt);
   void SetExpressionPosition(Expression* expr);
+  void SetExpressionAsStatementPosition(Expression* expr);
 
   // Accessors
   Zone* zone() const { return zone_; }
@@ -269,7 +265,23 @@
     return &temporary_allocator_;
   }
 
-  void EnsureReturn(FunctionLiteral* literal);
+  void EnsureReturn();
+
+  static OperandScale OperandSizesToScale(
+      OperandSize size0, OperandSize size1 = OperandSize::kByte,
+      OperandSize size2 = OperandSize::kByte,
+      OperandSize size3 = OperandSize::kByte);
+
+  static OperandSize SizeForRegisterOperand(Register reg);
+  static OperandSize SizeForSignedOperand(int value);
+  static OperandSize SizeForUnsignedOperand(int value);
+  static OperandSize SizeForUnsignedOperand(size_t value);
+
+  static uint32_t RegisterOperand(Register reg);
+  static Register RegisterFromOperand(uint32_t operand);
+  static uint32_t SignedOperand(int value, OperandSize size);
+  static uint32_t UnsignedOperand(int value);
+  static uint32_t UnsignedOperand(size_t value);
 
  private:
   class PreviousBytecodeHelper;
@@ -278,7 +290,6 @@
   static Bytecode BytecodeForBinaryOperation(Token::Value op);
   static Bytecode BytecodeForCountOperation(Token::Value op);
   static Bytecode BytecodeForCompareOperation(Token::Value op);
-  static Bytecode BytecodeForWideOperands(Bytecode bytecode);
   static Bytecode BytecodeForStoreIC(LanguageMode language_mode);
   static Bytecode BytecodeForKeyedStoreIC(LanguageMode language_mode);
   static Bytecode BytecodeForLoadGlobal(TypeofMode typeof_mode);
@@ -288,32 +299,22 @@
   static Bytecode BytecodeForDelete(LanguageMode language_mode);
   static Bytecode BytecodeForCall(TailCallMode tail_call_mode);
 
-  static bool FitsInIdx8Operand(int value);
-  static bool FitsInIdx8Operand(size_t value);
-  static bool FitsInImm8Operand(int value);
-  static bool FitsInIdx16Operand(int value);
-  static bool FitsInIdx16Operand(size_t value);
-  static bool FitsInReg8Operand(Register value);
-  static bool FitsInReg8OperandUntranslated(Register value);
-  static bool FitsInReg16Operand(Register value);
-  static bool FitsInReg16OperandUntranslated(Register value);
-
-  // RegisterMover interface.
-  void MoveRegisterUntranslated(Register from, Register to) override;
-
   static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
-  static Bytecode GetJumpWithConstantWideOperand(Bytecode jump_smi8_operand);
   static Bytecode GetJumpWithToBoolean(Bytecode jump_smi8_operand);
 
   template <size_t N>
-  INLINE(void Output(Bytecode bytecode, uint32_t(&operands)[N]));
-  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-              uint32_t operand2, uint32_t operand3);
-  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-              uint32_t operand2);
-  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
-  void Output(Bytecode bytecode, uint32_t operand0);
+  INLINE(void Output(Bytecode bytecode, uint32_t (&operands)[N],
+                     OperandScale operand_scale = OperandScale::kSingle));
   void Output(Bytecode bytecode);
+  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
+                    uint32_t operand0, uint32_t operand1, uint32_t operand2,
+                    uint32_t operand3);
+  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
+                    uint32_t operand0, uint32_t operand1, uint32_t operand2);
+  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
+                    uint32_t operand0, uint32_t operand1);
+  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
+                    uint32_t operand0);
 
   BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
                                    BytecodeLabel* label);
@@ -323,19 +324,21 @@
       const ZoneVector<uint8_t>::iterator& jump_location, int delta);
   void PatchIndirectJumpWith16BitOperand(
       const ZoneVector<uint8_t>::iterator& jump_location, int delta);
+  void PatchIndirectJumpWith32BitOperand(
+      const ZoneVector<uint8_t>::iterator& jump_location, int delta);
 
   void LeaveBasicBlock();
 
-  bool OperandIsValid(Bytecode bytecode, int operand_index,
-                      uint32_t operand_value) const;
-  bool RegisterIsValid(Register reg, OperandType reg_type) const;
+  bool OperandIsValid(Bytecode bytecode, OperandScale operand_scale,
+                      int operand_index, uint32_t operand_value) const;
+  bool RegisterIsValid(Register reg, OperandSize reg_size) const;
 
   bool LastBytecodeInSameBlock() const;
   bool NeedToBooleanCast();
   bool IsRegisterInAccumulator(Register reg);
 
-  // Set position for implicit return.
-  void SetReturnPosition(FunctionLiteral* fun);
+  // Set position for return.
+  void SetReturnPosition();
 
   // Gets a constant pool entry for the |object|.
   size_t GetConstantPoolEntry(Handle<Object> object);
@@ -355,7 +358,6 @@
   SourcePositionTableBuilder* source_position_table_builder() {
     return &source_position_table_builder_;
   }
-  RegisterTranslator* register_translator() { return &register_translator_; }
 
   Isolate* isolate_;
   Zone* zone_;
@@ -371,8 +373,8 @@
   int parameter_count_;
   int local_register_count_;
   int context_register_count_;
+  int return_position_;
   TemporaryRegisterAllocator temporary_allocator_;
-  RegisterTranslator register_translator_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
 };
diff --git a/src/interpreter/bytecode-array-iterator.cc b/src/interpreter/bytecode-array-iterator.cc
index 0fea985..a17efcb 100644
--- a/src/interpreter/bytecode-array-iterator.cc
+++ b/src/interpreter/bytecode-array-iterator.cc
@@ -12,103 +12,119 @@
 
 BytecodeArrayIterator::BytecodeArrayIterator(
     Handle<BytecodeArray> bytecode_array)
-    : bytecode_array_(bytecode_array), bytecode_offset_(0) {}
-
-
-void BytecodeArrayIterator::Advance() {
-  bytecode_offset_ += Bytecodes::Size(current_bytecode());
+    : bytecode_array_(bytecode_array),
+      bytecode_offset_(0),
+      operand_scale_(OperandScale::kSingle),
+      prefix_offset_(0) {
+  UpdateOperandScale();
 }
 
+void BytecodeArrayIterator::Advance() {
+  bytecode_offset_ += current_bytecode_size();
+  UpdateOperandScale();
+}
+
+void BytecodeArrayIterator::UpdateOperandScale() {
+  if (!done()) {
+    uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
+    Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+    if (Bytecodes::IsPrefixScalingBytecode(current_bytecode)) {
+      operand_scale_ =
+          Bytecodes::PrefixBytecodeToOperandScale(current_bytecode);
+      prefix_offset_ = 1;
+    } else {
+      operand_scale_ = OperandScale::kSingle;
+      prefix_offset_ = 0;
+    }
+  }
+}
 
 bool BytecodeArrayIterator::done() const {
   return bytecode_offset_ >= bytecode_array()->length();
 }
 
-
 Bytecode BytecodeArrayIterator::current_bytecode() const {
   DCHECK(!done());
-  uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
-  return interpreter::Bytecodes::FromByte(current_byte);
+  uint8_t current_byte =
+      bytecode_array()->get(bytecode_offset_ + current_prefix_offset());
+  Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+  DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
+  return current_bytecode;
 }
 
-
 int BytecodeArrayIterator::current_bytecode_size() const {
-  return Bytecodes::Size(current_bytecode());
+  return current_prefix_offset() +
+         Bytecodes::Size(current_bytecode(), current_operand_scale());
 }
 
-
-uint32_t BytecodeArrayIterator::GetRawOperand(int operand_index,
-                                              OperandType operand_type) const {
+uint32_t BytecodeArrayIterator::GetUnsignedOperand(
+    int operand_index, OperandType operand_type) const {
   DCHECK_GE(operand_index, 0);
   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
   DCHECK_EQ(operand_type,
             Bytecodes::GetOperandType(current_bytecode(), operand_index));
-  uint8_t* operand_start =
+  DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+  const uint8_t* operand_start =
       bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
-      Bytecodes::GetOperandOffset(current_bytecode(), operand_index);
-  switch (Bytecodes::SizeOfOperand(operand_type)) {
-    case OperandSize::kByte:
-      return static_cast<uint32_t>(*operand_start);
-    case OperandSize::kShort:
-      return ReadUnalignedUInt16(operand_start);
-    case OperandSize::kNone:
-      UNREACHABLE();
-  }
-  return 0;
+      current_prefix_offset() +
+      Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+                                  current_operand_scale());
+  return Bytecodes::DecodeUnsignedOperand(operand_start, operand_type,
+                                          current_operand_scale());
 }
 
-
-int8_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
-  uint32_t operand = GetRawOperand(operand_index, OperandType::kImm8);
-  return static_cast<int8_t>(operand);
+int32_t BytecodeArrayIterator::GetSignedOperand(
+    int operand_index, OperandType operand_type) const {
+  DCHECK_GE(operand_index, 0);
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+  DCHECK_EQ(operand_type,
+            Bytecodes::GetOperandType(current_bytecode(), operand_index));
+  DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+  const uint8_t* operand_start =
+      bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+      current_prefix_offset() +
+      Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+                                  current_operand_scale());
+  return Bytecodes::DecodeSignedOperand(operand_start, operand_type,
+                                        current_operand_scale());
 }
 
-int BytecodeArrayIterator::GetRegisterCountOperand(int operand_index) const {
-  OperandSize size =
-      Bytecodes::GetOperandSize(current_bytecode(), operand_index);
-  OperandType type = (size == OperandSize::kByte) ? OperandType::kRegCount8
-                                                  : OperandType::kRegCount16;
-  uint32_t operand = GetRawOperand(operand_index, type);
-  return static_cast<int>(operand);
+uint32_t BytecodeArrayIterator::GetFlagOperand(int operand_index) const {
+  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+            OperandType::kFlag8);
+  return GetUnsignedOperand(operand_index, OperandType::kFlag8);
 }
 
+int32_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
+  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+            OperandType::kImm);
+  return GetSignedOperand(operand_index, OperandType::kImm);
+}
 
-int BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
+uint32_t BytecodeArrayIterator::GetRegisterCountOperand(
+    int operand_index) const {
+  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+            OperandType::kRegCount);
+  return GetUnsignedOperand(operand_index, OperandType::kRegCount);
+}
+
+uint32_t BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
   OperandType operand_type =
       Bytecodes::GetOperandType(current_bytecode(), operand_index);
-  DCHECK(operand_type == OperandType::kIdx8 ||
-         operand_type == OperandType::kIdx16);
-  uint32_t operand = GetRawOperand(operand_index, operand_type);
-  return static_cast<int>(operand);
+  DCHECK_EQ(operand_type, OperandType::kIdx);
+  return GetUnsignedOperand(operand_index, operand_type);
 }
 
-
 Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
   OperandType operand_type =
       Bytecodes::GetOperandType(current_bytecode(), operand_index);
-  DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
-  uint32_t operand = GetRawOperand(operand_index, operand_type);
-  Register reg;
-  switch (Bytecodes::GetOperandSize(current_bytecode(), operand_index)) {
-    case OperandSize::kByte:
-      reg = Register::FromOperand(static_cast<uint8_t>(operand));
-      break;
-    case OperandSize::kShort:
-      reg = Register::FromWideOperand(static_cast<uint16_t>(operand));
-      break;
-    case OperandSize::kNone:
-      UNREACHABLE();
-      reg = Register::invalid_value();
-      break;
-  }
-  DCHECK_GE(reg.index(),
-            Register::FromParameterIndex(0, bytecode_array()->parameter_count())
-                .index());
-  DCHECK(reg.index() < bytecode_array()->register_count() ||
-         (reg.index() == 0 &&
-          Bytecodes::IsMaybeRegisterOperandType(
-              Bytecodes::GetOperandType(current_bytecode(), operand_index))));
-  return reg;
+  const uint8_t* operand_start =
+      bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+      current_prefix_offset() +
+      Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+                                  current_operand_scale());
+  return Bytecodes::DecodeRegisterOperand(operand_start, operand_type,
+                                          current_operand_scale());
 }
 
 int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
@@ -116,20 +132,17 @@
       Bytecodes::GetOperandType(current_bytecode(), operand_index);
   DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
   switch (operand_type) {
-    case OperandType::kRegPair8:
-    case OperandType::kRegPair16:
-    case OperandType::kRegOutPair8:
-    case OperandType::kRegOutPair16:
+    case OperandType::kRegPair:
+    case OperandType::kRegOutPair:
       return 2;
-    case OperandType::kRegOutTriple8:
-    case OperandType::kRegOutTriple16:
+    case OperandType::kRegOutTriple:
       return 3;
     default: {
       if (operand_index + 1 !=
           Bytecodes::NumberOfOperands(current_bytecode())) {
         OperandType next_operand_type =
             Bytecodes::GetOperandType(current_bytecode(), operand_index + 1);
-        if (Bytecodes::IsRegisterCountOperandType(next_operand_type)) {
+        if (OperandType::kRegCount == next_operand_type) {
           return GetRegisterCountOperand(operand_index + 1);
         }
       }
@@ -138,6 +151,13 @@
   }
 }
 
+uint32_t BytecodeArrayIterator::GetRuntimeIdOperand(int operand_index) const {
+  OperandType operand_type =
+      Bytecodes::GetOperandType(current_bytecode(), operand_index);
+  DCHECK(operand_type == OperandType::kRuntimeId);
+  return GetUnsignedOperand(operand_index, operand_type);
+}
+
 Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
     int operand_index) const {
   return FixedArray::get(bytecode_array()->constant_pool(),
@@ -150,11 +170,10 @@
   Bytecode bytecode = current_bytecode();
   if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
     int relative_offset = GetImmediateOperand(0);
-    return current_offset() + relative_offset;
-  } else if (interpreter::Bytecodes::IsJumpConstant(bytecode) ||
-             interpreter::Bytecodes::IsJumpConstantWide(bytecode)) {
+    return current_offset() + relative_offset + current_prefix_offset();
+  } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
     Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
-    return current_offset() + smi->value();
+    return current_offset() + smi->value() + current_prefix_offset();
   } else {
     UNREACHABLE();
     return kMinInt;
diff --git a/src/interpreter/bytecode-array-iterator.h b/src/interpreter/bytecode-array-iterator.h
index 5379bbf..b372894 100644
--- a/src/interpreter/bytecode-array-iterator.h
+++ b/src/interpreter/bytecode-array-iterator.h
@@ -21,31 +21,38 @@
   bool done() const;
   Bytecode current_bytecode() const;
   int current_bytecode_size() const;
-  void set_current_offset(int offset) { bytecode_offset_ = offset; }
   int current_offset() const { return bytecode_offset_; }
+  OperandScale current_operand_scale() const { return operand_scale_; }
+  int current_prefix_offset() const { return prefix_offset_; }
   const Handle<BytecodeArray>& bytecode_array() const {
     return bytecode_array_;
   }
 
-  int8_t GetImmediateOperand(int operand_index) const;
-  int GetIndexOperand(int operand_index) const;
-  int GetRegisterCountOperand(int operand_index) const;
+  uint32_t GetFlagOperand(int operand_index) const;
+  int32_t GetImmediateOperand(int operand_index) const;
+  uint32_t GetIndexOperand(int operand_index) const;
+  uint32_t GetRegisterCountOperand(int operand_index) const;
   Register GetRegisterOperand(int operand_index) const;
   int GetRegisterOperandRange(int operand_index) const;
+  uint32_t GetRuntimeIdOperand(int operand_index) const;
   Handle<Object> GetConstantForIndexOperand(int operand_index) const;
 
-  // Get the raw byte for the given operand. Note: you should prefer using the
-  // typed versions above which cast the return to an appropriate type.
-  uint32_t GetRawOperand(int operand_index, OperandType operand_type) const;
-
   // Returns the absolute offset of the branch target at the current
   // bytecode. It is an error to call this method if the bytecode is
   // not for a jump or conditional jump.
   int GetJumpTargetOffset() const;
 
  private:
+  uint32_t GetUnsignedOperand(int operand_index,
+                              OperandType operand_type) const;
+  int32_t GetSignedOperand(int operand_index, OperandType operand_type) const;
+
+  void UpdateOperandScale();
+
   Handle<BytecodeArray> bytecode_array_;
   int bytecode_offset_;
+  OperandScale operand_scale_;
+  int prefix_offset_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodeArrayIterator);
 };
diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc
index 6f4dc27..b0fa245 100644
--- a/src/interpreter/bytecode-generator.cc
+++ b/src/interpreter/bytecode-generator.cc
@@ -72,6 +72,7 @@
 
   Scope* scope() const { return scope_; }
   Register reg() const { return register_; }
+  bool ShouldPopContext() { return should_pop_context_; }
 
  private:
   const BytecodeArrayBuilder* builder() const { return generator_->builder(); }
@@ -212,9 +213,9 @@
  protected:
   bool Execute(Command command, Statement* statement) override {
     switch (command) {
-      case CMD_BREAK:
+      case CMD_BREAK:  // We should never see break/continue in top-level.
       case CMD_CONTINUE:
-        break;
+        UNREACHABLE();
       case CMD_RETURN:
         generator()->builder()->Return();
         return true;
@@ -362,15 +363,20 @@
 void BytecodeGenerator::ControlScope::PerformCommand(Command command,
                                                      Statement* statement) {
   ControlScope* current = this;
-  ContextScope* context = this->context();
+  ContextScope* context = generator()->execution_context();
+  // Pop context to the expected depth but do not pop the outermost context.
+  if (context != current->context() && context->ShouldPopContext()) {
+    generator()->builder()->PopContext(current->context()->reg());
+  }
   do {
-    if (current->Execute(command, statement)) { return; }
+    if (current->Execute(command, statement)) {
+      return;
+    }
     current = current->outer();
     if (current->context() != context) {
       // Pop context to the expected depth.
       // TODO(rmcilroy): Only emit a single context pop.
       generator()->builder()->PopContext(current->context()->reg());
-      context = current->context();
     }
   } while (current != nullptr);
   UNREACHABLE();
@@ -450,7 +456,7 @@
 
   virtual ~ExpressionResultScope() {
     generator_->set_execution_result(outer_);
-    DCHECK(result_identified());
+    DCHECK(result_identified() || generator_->HasStackOverflow());
   }
 
   bool IsEffect() const { return kind_ == Expression::kEffect; }
@@ -462,6 +468,7 @@
  protected:
   ExpressionResultScope* outer() const { return outer_; }
   BytecodeArrayBuilder* builder() const { return generator_->builder(); }
+  BytecodeGenerator* generator() const { return generator_; }
   const RegisterAllocationScope* allocator() const { return &allocator_; }
 
   void set_result_identified() {
@@ -536,7 +543,12 @@
     set_result_identified();
   }
 
-  Register ResultRegister() const { return result_register_; }
+  Register ResultRegister() {
+    if (generator()->HasStackOverflow() && !result_identified()) {
+      SetResultInAccumulator();
+    }
+    return result_register_;
+  }
 
  private:
   Register result_register_;
@@ -565,7 +577,8 @@
   // Initialize bytecode array builder.
   set_builder(new (zone()) BytecodeArrayBuilder(
       isolate(), zone(), info->num_parameters_including_this(),
-      scope()->MaxNestedContextChainLength(), scope()->num_stack_slots()));
+      scope()->MaxNestedContextChainLength(), scope()->num_stack_slots(),
+      info->literal()));
 
   // Initialize the incoming context.
   ContextScope incoming_context(this, scope(), false);
@@ -584,7 +597,7 @@
     MakeBytecodeBody();
   }
 
-  builder()->EnsureReturn(info->literal());
+  builder()->EnsureReturn();
   set_scope(nullptr);
   set_info(nullptr);
   return builder()->ToBytecodeArray();
@@ -611,12 +624,6 @@
     UNIMPLEMENTED();
   }
 
-  // Visit illegal re-declaration and bail out if it exists.
-  if (scope()->HasIllegalRedeclaration()) {
-    VisitForEffect(scope()->GetIllegalRedeclaration());
-    return;
-  }
-
   // Visit declarations within the function scope.
   VisitDeclarations(scope()->declarations());
 
@@ -826,6 +833,7 @@
 
 
 void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
+  builder()->SetStatementPosition(stmt);
   BytecodeLabel else_label, end_label;
   if (stmt->condition()->ToBooleanIsTrue()) {
     // Generate then block unconditionally as always true.
@@ -861,23 +869,26 @@
 
 
 void BytecodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+  builder()->SetStatementPosition(stmt);
   execution_control()->Continue(stmt->target());
 }
 
 
 void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+  builder()->SetStatementPosition(stmt);
   execution_control()->Break(stmt->target());
 }
 
 
 void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
-  VisitForAccumulatorValue(stmt->expression());
   builder()->SetStatementPosition(stmt);
+  VisitForAccumulatorValue(stmt->expression());
   execution_control()->ReturnAccumulator();
 }
 
 
 void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
+  builder()->SetStatementPosition(stmt);
   VisitForAccumulatorValue(stmt->expression());
   builder()->CastAccumulatorToJSObject();
   VisitNewLocalWithContext();
@@ -893,6 +904,8 @@
   ControlScopeForBreakable scope(this, stmt, &switch_builder);
   int default_index = -1;
 
+  builder()->SetStatementPosition(stmt);
+
   // Keep the switch value in a register until a case matches.
   Register tag = VisitForRegisterValue(stmt->tag());
 
@@ -959,6 +972,7 @@
   } else {
     VisitIterationBody(stmt, &loop_builder);
     loop_builder.Condition();
+    builder()->SetExpressionAsStatementPosition(stmt->cond());
     VisitForAccumulatorValue(stmt->cond());
     loop_builder.JumpToHeaderIfTrue();
   }
@@ -975,6 +989,7 @@
   loop_builder.LoopHeader();
   loop_builder.Condition();
   if (!stmt->cond()->ToBooleanIsTrue()) {
+    builder()->SetExpressionAsStatementPosition(stmt->cond());
     VisitForAccumulatorValue(stmt->cond());
     loop_builder.BreakIfFalse();
   }
@@ -998,12 +1013,14 @@
   loop_builder.LoopHeader();
   loop_builder.Condition();
   if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
+    builder()->SetExpressionAsStatementPosition(stmt->cond());
     VisitForAccumulatorValue(stmt->cond());
     loop_builder.BreakIfFalse();
   }
   VisitIterationBody(stmt, &loop_builder);
   if (stmt->next() != nullptr) {
     loop_builder.Next();
+    builder()->SetStatementPosition(stmt->next());
     Visit(stmt->next());
   }
   loop_builder.JumpToHeader();
@@ -1087,28 +1104,28 @@
 
 void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
   if (stmt->subject()->IsNullLiteral() ||
-      stmt->subject()->IsUndefinedLiteral(isolate())) {
+      stmt->subject()->IsUndefinedLiteral()) {
     // ForIn generates lots of code, skip if it wouldn't produce any effects.
     return;
   }
 
   LoopBuilder loop_builder(builder());
-  BytecodeLabel subject_null_label, subject_undefined_label, not_object_label;
+  BytecodeLabel subject_null_label, subject_undefined_label;
 
   // Prepare the state for executing ForIn.
+  builder()->SetExpressionAsStatementPosition(stmt->subject());
   VisitForAccumulatorValue(stmt->subject());
   builder()->JumpIfUndefined(&subject_undefined_label);
   builder()->JumpIfNull(&subject_null_label);
   Register receiver = register_allocator()->NewRegister();
   builder()->CastAccumulatorToJSObject();
-  builder()->JumpIfNull(&not_object_label);
   builder()->StoreAccumulatorInRegister(receiver);
 
   register_allocator()->PrepareForConsecutiveAllocations(3);
   Register cache_type = register_allocator()->NextConsecutiveRegister();
   Register cache_array = register_allocator()->NextConsecutiveRegister();
   Register cache_length = register_allocator()->NextConsecutiveRegister();
-  // Used as kRegTriple8 and kRegPair8 in ForInPrepare and ForInNext.
+  // Used as kRegTriple and kRegPair in ForInPrepare and ForInNext.
   USE(cache_array);
   builder()->ForInPrepare(cache_type);
 
@@ -1119,11 +1136,13 @@
 
   // The loop
   loop_builder.LoopHeader();
+  builder()->SetExpressionAsStatementPosition(stmt->each());
   loop_builder.Condition();
   builder()->ForInDone(index, cache_length);
   loop_builder.BreakIfTrue();
   DCHECK(Register::AreContiguous(cache_type, cache_array));
-  builder()->ForInNext(receiver, index, cache_type);
+  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+  builder()->ForInNext(receiver, index, cache_type, feedback_index(slot));
   loop_builder.ContinueIfUndefined();
   VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
   VisitIterationBody(stmt, &loop_builder);
@@ -1132,7 +1151,6 @@
   builder()->StoreAccumulatorInRegister(index);
   loop_builder.JumpToHeader();
   loop_builder.EndLoop();
-  builder()->Bind(&not_object_label);
   builder()->Bind(&subject_null_label);
   builder()->Bind(&subject_undefined_label);
 }
@@ -1146,6 +1164,7 @@
 
   loop_builder.LoopHeader();
   loop_builder.Next();
+  builder()->SetExpressionAsStatementPosition(stmt->next_result());
   VisitForEffect(stmt->next_result());
   VisitForAccumulatorValue(stmt->result_done());
   loop_builder.BreakIfTrue();
@@ -1180,8 +1199,10 @@
   VisitNewLocalCatchContext(stmt->variable());
   builder()->StoreAccumulatorInRegister(context);
 
-  // Clear message object as we enter the catch block.
-  builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0);
+  // If requested, clear message object as we enter the catch block.
+  if (stmt->clear_pending_message()) {
+    builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0);
+  }
 
   // Load the catch context into the accumulator.
   builder()->LoadAccumulatorWithRegister(context);
@@ -1267,7 +1288,9 @@
   // Find or build a shared function info.
   Handle<SharedFunctionInfo> shared_info =
       Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
-  CHECK(!shared_info.is_null());  // TODO(rmcilroy): Set stack overflow?
+  if (shared_info.is_null()) {
+    return SetStackOverflow();
+  }
   builder()->CreateClosure(shared_info,
                            expr->pretenure() ? TENURED : NOT_TENURED);
   execution_result()->SetResultInAccumulator();
@@ -1679,11 +1702,6 @@
     }
   }
 
-  // Transform literals that contain functions to fast properties.
-  if (expr->has_function()) {
-    builder()->CallRuntime(Runtime::kToFastProperties, literal, 1);
-  }
-
   execution_result()->SetResultInRegister(literal);
 }
 
@@ -1729,6 +1747,7 @@
 
 
 void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
+  builder()->SetExpressionPosition(proxy);
   VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
 }
 
@@ -2173,6 +2192,7 @@
   }
 
   // Store the value.
+  builder()->SetExpressionPosition(expr);
   FeedbackVectorSlot slot = expr->AssignmentSlot();
   switch (assign_type) {
     case VARIABLE: {
@@ -2210,6 +2230,7 @@
 
 void BytecodeGenerator::VisitThrow(Throw* expr) {
   VisitForAccumulatorValue(expr->exception());
+  builder()->SetExpressionPosition(expr);
   builder()->Throw();
   // Throw statments are modeled as expression instead of statments. These are
   // converted from assignment statements in Rewriter::ReWrite pass. An
@@ -2222,6 +2243,7 @@
 void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
   LhsKind property_kind = Property::GetAssignType(expr);
   FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
+  builder()->SetExpressionPosition(expr);
   switch (property_kind) {
     case VARIABLE:
       UNREACHABLE();
@@ -2718,9 +2740,7 @@
   }
 
   // Convert old value into a number.
-  if (!is_strong(language_mode())) {
-    builder()->CastAccumulatorToNumber();
-  }
+  builder()->CastAccumulatorToNumber();
 
   // Save result for postfix expressions.
   if (is_postfix) {
@@ -2732,6 +2752,7 @@
   builder()->CountOperation(expr->binary_op());
 
   // Store the value.
+  builder()->SetExpressionPosition(expr);
   FeedbackVectorSlot feedback_slot = expr->CountSlot();
   switch (assign_type) {
     case VARIABLE: {
@@ -2791,6 +2812,7 @@
 void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Register lhs = VisitForRegisterValue(expr->left());
   VisitForAccumulatorValue(expr->right());
+  builder()->SetExpressionPosition(expr);
   builder()->CompareOperation(expr->op(), lhs);
   execution_result()->SetResultInAccumulator();
 }
@@ -3129,12 +3151,12 @@
 
 
 LanguageMode BytecodeGenerator::language_mode() const {
-  return info()->language_mode();
+  return execution_context()->scope()->language_mode();
 }
 
 
 int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
-  return info()->feedback_vector()->GetIndex(slot);
+  return info()->shared_info()->feedback_vector()->GetIndex(slot);
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-register-allocator.cc b/src/interpreter/bytecode-register-allocator.cc
index 0a617c0..9bdde9a 100644
--- a/src/interpreter/bytecode-register-allocator.cc
+++ b/src/interpreter/bytecode-register-allocator.cc
@@ -95,17 +95,6 @@
       start = run_end;
       run_length = 0;
     }
-    Register reg_start(*start);
-    Register reg_expected(expected);
-    if (RegisterTranslator::DistanceToTranslationWindow(reg_start) > 0 &&
-        RegisterTranslator::DistanceToTranslationWindow(reg_expected) <= 0) {
-      // Run straddles the lower edge of the translation window. Registers
-      // after the start of this boundary are displaced by the register
-      // translator to provide a hole for translation. Runs either side
-      // of the boundary are fine.
-      start = run_end;
-      run_length = 0;
-    }
     if (++run_length == count) {
       return *start;
     }
@@ -121,16 +110,6 @@
   // Pad temporaries if extended run would cross translation boundary.
   Register reg_first(*start);
   Register reg_last(*start + static_cast<int>(count) - 1);
-  DCHECK_GT(RegisterTranslator::DistanceToTranslationWindow(reg_first),
-            RegisterTranslator::DistanceToTranslationWindow(reg_last));
-  while (RegisterTranslator::DistanceToTranslationWindow(reg_first) > 0 &&
-         RegisterTranslator::DistanceToTranslationWindow(reg_last) <= 0) {
-    auto pos_insert_pair =
-        free_temporaries_.insert(AllocateTemporaryRegister());
-    reg_first = Register(*pos_insert_pair.first);
-    reg_last = Register(reg_first.index() + static_cast<int>(count) - 1);
-    run_length = 0;
-  }
 
   // Ensure enough registers for run.
   while (run_length++ < count) {
@@ -139,10 +118,6 @@
 
   int run_start =
       last_temporary_register().index() - static_cast<int>(count) + 1;
-  DCHECK(RegisterTranslator::DistanceToTranslationWindow(Register(run_start)) <=
-             0 ||
-         RegisterTranslator::DistanceToTranslationWindow(
-             Register(run_start + static_cast<int>(count) - 1)) > 0);
   return run_start;
 }
 
diff --git a/src/interpreter/bytecode-traits.h b/src/interpreter/bytecode-traits.h
index b813605..c724827 100644
--- a/src/interpreter/bytecode-traits.h
+++ b/src/interpreter/bytecode-traits.h
@@ -11,22 +11,35 @@
 namespace internal {
 namespace interpreter {
 
-// TODO(rmcilroy): consider simplifying this to avoid the template magic.
+template <OperandTypeInfo>
+struct OperandTypeInfoTraits {
+  static const bool kIsScalable = false;
+  static const bool kIsUnsigned = false;
+  static const OperandSize kUnscaledSize = OperandSize::kNone;
+};
 
-// Template helpers to deduce the number of operands each bytecode has.
-#define OPERAND_TERM OperandType::kNone, OperandType::kNone, OperandType::kNone
+#define DECLARE_OPERAND_TYPE_INFO(Name, Scalable, Unsigned, BaseSize) \
+  template <>                                                         \
+  struct OperandTypeInfoTraits<OperandTypeInfo::k##Name> {            \
+    static const bool kIsScalable = Scalable;                         \
+    static const bool kIsUnsigned = Unsigned;                         \
+    static const OperandSize kUnscaledSize = BaseSize;                \
+  };
+OPERAND_TYPE_INFO_LIST(DECLARE_OPERAND_TYPE_INFO)
+#undef DECLARE_OPERAND_TYPE_INFO
 
 template <OperandType>
-struct OperandTraits {};
+struct OperandTraits {
+  typedef OperandTypeInfoTraits<OperandTypeInfo::kNone> TypeInfo;
+};
 
-#define DECLARE_OPERAND_SIZE(Name, Size)             \
-  template <>                                        \
-  struct OperandTraits<OperandType::k##Name> {       \
-    static const OperandSize kSizeType = Size;       \
-    static const int kSize = static_cast<int>(Size); \
+#define DECLARE_OPERAND_TYPE_TRAITS(Name, InfoType)   \
+  template <>                                         \
+  struct OperandTraits<OperandType::k##Name> {        \
+    typedef OperandTypeInfoTraits<InfoType> TypeInfo; \
   };
-OPERAND_TYPE_LIST(DECLARE_OPERAND_SIZE)
-#undef DECLARE_OPERAND_SIZE
+OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE_TRAITS)
+#undef DECLARE_OPERAND_TYPE_TRAITS
 
 template <OperandType>
 struct RegisterOperandTraits {
@@ -41,13 +54,13 @@
 REGISTER_OPERAND_TYPE_LIST(DECLARE_REGISTER_OPERAND)
 #undef DECLARE_REGISTER_OPERAND
 
-template <OperandType... Args>
+template <AccumulatorUse, OperandType...>
 struct BytecodeTraits {};
 
-template <OperandType operand_0, OperandType operand_1, OperandType operand_2,
-          OperandType operand_3>
-struct BytecodeTraits<operand_0, operand_1, operand_2, operand_3,
-                      OPERAND_TERM> {
+template <AccumulatorUse accumulator_use, OperandType operand_0,
+          OperandType operand_1, OperandType operand_2, OperandType operand_3>
+struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2,
+                      operand_3> {
   static OperandType GetOperandType(int i) {
     DCHECK(0 <= i && i < kOperandCount);
     const OperandType kOperands[] = {operand_0, operand_1, operand_2,
@@ -55,32 +68,20 @@
     return kOperands[i];
   }
 
-  static inline OperandSize GetOperandSize(int i) {
-    DCHECK(0 <= i && i < kOperandCount);
-    const OperandSize kOperandSizes[] =
-        {OperandTraits<operand_0>::kSizeType,
-         OperandTraits<operand_1>::kSizeType,
-         OperandTraits<operand_2>::kSizeType,
-         OperandTraits<operand_3>::kSizeType};
-    return kOperandSizes[i];
-  }
-
-  static inline int GetOperandOffset(int i) {
-    DCHECK(0 <= i && i < kOperandCount);
-    const int kOffset0 = 1;
-    const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
-    const int kOffset2 = kOffset1 + OperandTraits<operand_1>::kSize;
-    const int kOffset3 = kOffset2 + OperandTraits<operand_2>::kSize;
-    const int kOperandOffsets[] = {kOffset0, kOffset1, kOffset2, kOffset3};
-    return kOperandOffsets[i];
-  }
-
   template <OperandType ot>
   static inline bool HasAnyOperandsOfType() {
     return operand_0 == ot || operand_1 == ot || operand_2 == ot ||
            operand_3 == ot;
   }
 
+  static inline bool IsScalable() {
+    return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
+            OperandTraits<operand_1>::TypeInfo::kIsScalable |
+            OperandTraits<operand_2>::TypeInfo::kIsScalable |
+            OperandTraits<operand_3>::TypeInfo::kIsScalable);
+  }
+
+  static const AccumulatorUse kAccumulatorUse = accumulator_use;
   static const int kOperandCount = 4;
   static const int kRegisterOperandCount =
       RegisterOperandTraits<operand_0>::kIsRegisterOperand +
@@ -92,42 +93,29 @@
       (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
       (RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2) +
       (RegisterOperandTraits<operand_3>::kIsRegisterOperand << 3);
-  static const int kSize =
-      1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
-      OperandTraits<operand_2>::kSize + OperandTraits<operand_3>::kSize;
 };
 
-template <OperandType operand_0, OperandType operand_1, OperandType operand_2>
-struct BytecodeTraits<operand_0, operand_1, operand_2, OPERAND_TERM> {
+template <AccumulatorUse accumulator_use, OperandType operand_0,
+          OperandType operand_1, OperandType operand_2>
+struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2> {
   static inline OperandType GetOperandType(int i) {
     DCHECK(0 <= i && i <= 2);
     const OperandType kOperands[] = {operand_0, operand_1, operand_2};
     return kOperands[i];
   }
 
-  static inline OperandSize GetOperandSize(int i) {
-    DCHECK(0 <= i && i < kOperandCount);
-    const OperandSize kOperandSizes[] =
-        {OperandTraits<operand_0>::kSizeType,
-         OperandTraits<operand_1>::kSizeType,
-         OperandTraits<operand_2>::kSizeType};
-    return kOperandSizes[i];
-  }
-
-  static inline int GetOperandOffset(int i) {
-    DCHECK(0 <= i && i < kOperandCount);
-    const int kOffset0 = 1;
-    const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
-    const int kOffset2 = kOffset1 + OperandTraits<operand_1>::kSize;
-    const int kOperandOffsets[] = {kOffset0, kOffset1, kOffset2};
-    return kOperandOffsets[i];
-  }
-
   template <OperandType ot>
   static inline bool HasAnyOperandsOfType() {
     return operand_0 == ot || operand_1 == ot || operand_2 == ot;
   }
 
+  static inline bool IsScalable() {
+    return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
+            OperandTraits<operand_1>::TypeInfo::kIsScalable |
+            OperandTraits<operand_2>::TypeInfo::kIsScalable);
+  }
+
+  static const AccumulatorUse kAccumulatorUse = accumulator_use;
   static const int kOperandCount = 3;
   static const int kRegisterOperandCount =
       RegisterOperandTraits<operand_0>::kIsRegisterOperand +
@@ -137,40 +125,28 @@
       RegisterOperandTraits<operand_0>::kIsRegisterOperand +
       (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
       (RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2);
-  static const int kSize =
-      1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
-      OperandTraits<operand_2>::kSize;
 };
 
-template <OperandType operand_0, OperandType operand_1>
-struct BytecodeTraits<operand_0, operand_1, OPERAND_TERM> {
+template <AccumulatorUse accumulator_use, OperandType operand_0,
+          OperandType operand_1>
+struct BytecodeTraits<accumulator_use, operand_0, operand_1> {
   static inline OperandType GetOperandType(int i) {
     DCHECK(0 <= i && i < kOperandCount);
     const OperandType kOperands[] = {operand_0, operand_1};
     return kOperands[i];
   }
 
-  static inline OperandSize GetOperandSize(int i) {
-    DCHECK(0 <= i && i < kOperandCount);
-    const OperandSize kOperandSizes[] =
-        {OperandTraits<operand_0>::kSizeType,
-         OperandTraits<operand_1>::kSizeType};
-    return kOperandSizes[i];
-  }
-
-  static inline int GetOperandOffset(int i) {
-    DCHECK(0 <= i && i < kOperandCount);
-    const int kOffset0 = 1;
-    const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
-    const int kOperandOffsets[] = {kOffset0, kOffset1};
-    return kOperandOffsets[i];
-  }
-
   template <OperandType ot>
   static inline bool HasAnyOperandsOfType() {
     return operand_0 == ot || operand_1 == ot;
   }
 
+  static inline bool IsScalable() {
+    return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
+            OperandTraits<operand_1>::TypeInfo::kIsScalable);
+  }
+
+  static const AccumulatorUse kAccumulatorUse = accumulator_use;
   static const int kOperandCount = 2;
   static const int kRegisterOperandCount =
       RegisterOperandTraits<operand_0>::kIsRegisterOperand +
@@ -178,68 +154,91 @@
   static const int kRegisterOperandBitmap =
       RegisterOperandTraits<operand_0>::kIsRegisterOperand +
       (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1);
-  static const int kSize =
-      1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize;
 };
 
-template <OperandType operand_0>
-struct BytecodeTraits<operand_0, OPERAND_TERM> {
+template <AccumulatorUse accumulator_use, OperandType operand_0>
+struct BytecodeTraits<accumulator_use, operand_0> {
   static inline OperandType GetOperandType(int i) {
     DCHECK(i == 0);
     return operand_0;
   }
 
-  static inline OperandSize GetOperandSize(int i) {
-    DCHECK(i == 0);
-    return OperandTraits<operand_0>::kSizeType;
-  }
-
-  static inline int GetOperandOffset(int i) {
-    DCHECK(i == 0);
-    return 1;
-  }
-
   template <OperandType ot>
   static inline bool HasAnyOperandsOfType() {
     return operand_0 == ot;
   }
 
+  static inline bool IsScalable() {
+    return OperandTraits<operand_0>::TypeInfo::kIsScalable;
+  }
+
+  static const AccumulatorUse kAccumulatorUse = accumulator_use;
   static const int kOperandCount = 1;
   static const int kRegisterOperandCount =
       RegisterOperandTraits<operand_0>::kIsRegisterOperand;
   static const int kRegisterOperandBitmap =
       RegisterOperandTraits<operand_0>::kIsRegisterOperand;
-  static const int kSize = 1 + OperandTraits<operand_0>::kSize;
 };
 
-template <>
-struct BytecodeTraits<OperandType::kNone, OPERAND_TERM> {
+template <AccumulatorUse accumulator_use>
+struct BytecodeTraits<accumulator_use> {
   static inline OperandType GetOperandType(int i) {
     UNREACHABLE();
     return OperandType::kNone;
   }
 
-  static inline OperandSize GetOperandSize(int i) {
-    UNREACHABLE();
-    return OperandSize::kNone;
-  }
-
-  static inline int GetOperandOffset(int i) {
-    UNREACHABLE();
-    return 1;
-  }
-
   template <OperandType ot>
   static inline bool HasAnyOperandsOfType() {
     return false;
   }
 
+  static inline bool IsScalable() { return false; }
+
+  static const AccumulatorUse kAccumulatorUse = accumulator_use;
   static const int kOperandCount = 0;
   static const int kRegisterOperandCount = 0;
   static const int kRegisterOperandBitmap = 0;
-  static const int kSize = 1 + OperandTraits<OperandType::kNone>::kSize;
 };
 
+template <bool>
+struct OperandScaler {
+  static int Multiply(int size, int operand_scale) { return 0; }
+};
+
+template <>
+struct OperandScaler<false> {
+  static int Multiply(int size, int operand_scale) { return size; }
+};
+
+template <>
+struct OperandScaler<true> {
+  static int Multiply(int size, int operand_scale) {
+    return size * operand_scale;
+  }
+};
+
+static OperandSize ScaledOperandSize(OperandType operand_type,
+                                     OperandScale operand_scale) {
+  switch (operand_type) {
+#define CASE(Name, TypeInfo)                                                   \
+  case OperandType::k##Name: {                                                 \
+    OperandSize base_size = OperandTypeInfoTraits<TypeInfo>::kUnscaledSize;    \
+    int size =                                                                 \
+        OperandScaler<OperandTypeInfoTraits<TypeInfo>::kIsScalable>::Multiply( \
+            static_cast<int>(base_size), static_cast<int>(operand_scale));     \
+    OperandSize operand_size = static_cast<OperandSize>(size);                 \
+    DCHECK(operand_size == OperandSize::kByte ||                               \
+           operand_size == OperandSize::kShort ||                              \
+           operand_size == OperandSize::kQuad);                                \
+    return operand_size;                                                       \
+  }
+    OPERAND_TYPE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return OperandSize::kNone;
+}
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecodes.cc b/src/interpreter/bytecodes.cc
index c3b17c7..fd27f39 100644
--- a/src/interpreter/bytecodes.cc
+++ b/src/interpreter/bytecodes.cc
@@ -4,8 +4,11 @@
 
 #include "src/interpreter/bytecodes.h"
 
+#include <iomanip>
+
 #include "src/frames.h"
 #include "src/interpreter/bytecode-traits.h"
+#include "src/interpreter/interpreter.h"
 
 namespace v8 {
 namespace internal {
@@ -25,6 +28,35 @@
   return "";
 }
 
+// static
+std::string Bytecodes::ToString(Bytecode bytecode, OperandScale operand_scale) {
+  static const char kSeparator = '.';
+
+  std::string value(ToString(bytecode));
+  if (operand_scale > OperandScale::kSingle) {
+    Bytecode prefix_bytecode = OperandScaleToPrefixBytecode(operand_scale);
+    std::string suffix = ToString(prefix_bytecode);
+    return value.append(1, kSeparator).append(suffix);
+  } else {
+    return value;
+  }
+}
+
+// static
+const char* Bytecodes::AccumulatorUseToString(AccumulatorUse accumulator_use) {
+  switch (accumulator_use) {
+    case AccumulatorUse::kNone:
+      return "None";
+    case AccumulatorUse::kRead:
+      return "Read";
+    case AccumulatorUse::kWrite:
+      return "Write";
+    case AccumulatorUse::kReadWrite:
+      return "ReadWrite";
+  }
+  UNREACHABLE();
+  return "";
+}
 
 // static
 const char* Bytecodes::OperandTypeToString(OperandType operand_type) {
@@ -39,6 +71,20 @@
   return "";
 }
 
+// static
+const char* Bytecodes::OperandScaleToString(OperandScale operand_scale) {
+  switch (operand_scale) {
+    case OperandScale::kSingle:
+      return "Single";
+    case OperandScale::kDouble:
+      return "Double";
+    case OperandScale::kQuadruple:
+      return "Quadruple";
+    case OperandScale::kInvalid:
+      UNREACHABLE();
+  }
+  return "";
+}
 
 // static
 const char* Bytecodes::OperandSizeToString(OperandSize operand_size) {
@@ -49,6 +95,8 @@
       return "Byte";
     case OperandSize::kShort:
       return "Short";
+    case OperandSize::kQuad:
+      return "Quad";
   }
   UNREACHABLE();
   return "";
@@ -72,31 +120,34 @@
 
 // static
 Bytecode Bytecodes::GetDebugBreak(Bytecode bytecode) {
-  switch (Size(bytecode)) {
-#define CASE(Name, ...)                                  \
-  case BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kSize: \
-    return Bytecode::k##Name;
-    DEBUG_BREAK_BYTECODE_LIST(CASE)
-#undef CASE
-    default:
-      break;
+  DCHECK(!IsDebugBreak(bytecode));
+  if (bytecode == Bytecode::kWide) {
+    return Bytecode::kDebugBreakWide;
   }
+  if (bytecode == Bytecode::kExtraWide) {
+    return Bytecode::kDebugBreakExtraWide;
+  }
+  int bytecode_size = Size(bytecode, OperandScale::kSingle);
+#define RETURN_IF_DEBUG_BREAK_SIZE_MATCHES(Name, ...)                    \
+  if (bytecode_size == Size(Bytecode::k##Name, OperandScale::kSingle)) { \
+    return Bytecode::k##Name;                                            \
+  }
+  DEBUG_BREAK_PLAIN_BYTECODE_LIST(RETURN_IF_DEBUG_BREAK_SIZE_MATCHES)
+#undef RETURN_IF_DEBUG_BREAK_SIZE_MATCHES
   UNREACHABLE();
-  return static_cast<Bytecode>(-1);
+  return Bytecode::kIllegal;
 }
 
 // static
-int Bytecodes::Size(Bytecode bytecode) {
-  DCHECK(bytecode <= Bytecode::kLast);
-  switch (bytecode) {
-#define CASE(Name, ...)   \
-  case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kSize;
-    BYTECODE_LIST(CASE)
-#undef CASE
+int Bytecodes::Size(Bytecode bytecode, OperandScale operand_scale) {
+  int size = 1;
+  for (int i = 0; i < NumberOfOperands(bytecode); i++) {
+    OperandSize operand_size = GetOperandSize(bytecode, i, operand_scale);
+    int delta = static_cast<int>(operand_size);
+    DCHECK(base::bits::IsPowerOfTwo32(static_cast<uint32_t>(delta)));
+    size += delta;
   }
-  UNREACHABLE();
-  return 0;
+  return size;
 }
 
 
@@ -106,7 +157,7 @@
   switch (bytecode) {
 #define CASE(Name, ...)   \
   case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kOperandCount;
+    return BytecodeTraits<__VA_ARGS__>::kOperandCount;
     BYTECODE_LIST(CASE)
 #undef CASE
   }
@@ -119,9 +170,9 @@
 int Bytecodes::NumberOfRegisterOperands(Bytecode bytecode) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
-#define CASE(Name, ...)                                            \
-  case Bytecode::k##Name:                                          \
-    typedef BytecodeTraits<__VA_ARGS__, OPERAND_TERM> Name##Trait; \
+#define CASE(Name, ...)                              \
+  case Bytecode::k##Name:                            \
+    typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
     return Name##Trait::kRegisterOperandCount;
     BYTECODE_LIST(CASE)
 #undef CASE
@@ -131,12 +182,71 @@
 }
 
 // static
+Bytecode Bytecodes::OperandScaleToPrefixBytecode(OperandScale operand_scale) {
+  switch (operand_scale) {
+    case OperandScale::kQuadruple:
+      return Bytecode::kExtraWide;
+    case OperandScale::kDouble:
+      return Bytecode::kWide;
+    default:
+      UNREACHABLE();
+      return Bytecode::kIllegal;
+  }
+}
+
+// static
+bool Bytecodes::OperandScaleRequiresPrefixBytecode(OperandScale operand_scale) {
+  return operand_scale != OperandScale::kSingle;
+}
+
+// static
+OperandScale Bytecodes::PrefixBytecodeToOperandScale(Bytecode bytecode) {
+  switch (bytecode) {
+    case Bytecode::kExtraWide:
+    case Bytecode::kDebugBreakExtraWide:
+      return OperandScale::kQuadruple;
+    case Bytecode::kWide:
+    case Bytecode::kDebugBreakWide:
+      return OperandScale::kDouble;
+    default:
+      UNREACHABLE();
+      return OperandScale::kSingle;
+  }
+}
+
+// static
+AccumulatorUse Bytecodes::GetAccumulatorUse(Bytecode bytecode) {
+  DCHECK(bytecode <= Bytecode::kLast);
+  switch (bytecode) {
+#define CASE(Name, ...)   \
+  case Bytecode::k##Name: \
+    return BytecodeTraits<__VA_ARGS__>::kAccumulatorUse;
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return AccumulatorUse::kNone;
+}
+
+// static
+bool Bytecodes::ReadsAccumulator(Bytecode bytecode) {
+  return (GetAccumulatorUse(bytecode) & AccumulatorUse::kRead) ==
+         AccumulatorUse::kRead;
+}
+
+// static
+bool Bytecodes::WritesAccumulator(Bytecode bytecode) {
+  return (GetAccumulatorUse(bytecode) & AccumulatorUse::kWrite) ==
+         AccumulatorUse::kWrite;
+}
+
+// static
 OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
 #define CASE(Name, ...)   \
   case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandType(i);
+    return BytecodeTraits<__VA_ARGS__>::GetOperandType(i);
     BYTECODE_LIST(CASE)
 #undef CASE
   }
@@ -144,29 +254,20 @@
   return OperandType::kNone;
 }
 
-
 // static
-OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i) {
-  DCHECK(bytecode <= Bytecode::kLast);
-  switch (bytecode) {
-#define CASE(Name, ...)   \
-  case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandSize(i);
-    BYTECODE_LIST(CASE)
-#undef CASE
-  }
-  UNREACHABLE();
-  return OperandSize::kNone;
+OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i,
+                                      OperandScale operand_scale) {
+  OperandType op_type = GetOperandType(bytecode, i);
+  return ScaledOperandSize(op_type, operand_scale);
 }
 
-
 // static
 int Bytecodes::GetRegisterOperandBitmap(Bytecode bytecode) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
-#define CASE(Name, ...)                                            \
-  case Bytecode::k##Name:                                          \
-    typedef BytecodeTraits<__VA_ARGS__, OPERAND_TERM> Name##Trait; \
+#define CASE(Name, ...)                              \
+  case Bytecode::k##Name:                            \
+    typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
     return Name##Trait::kRegisterOperandBitmap;
     BYTECODE_LIST(CASE)
 #undef CASE
@@ -176,34 +277,25 @@
 }
 
 // static
-int Bytecodes::GetOperandOffset(Bytecode bytecode, int i) {
-  DCHECK(bytecode <= Bytecode::kLast);
-  switch (bytecode) {
-#define CASE(Name, ...)   \
-  case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandOffset(i);
-    BYTECODE_LIST(CASE)
-#undef CASE
+int Bytecodes::GetOperandOffset(Bytecode bytecode, int i,
+                                OperandScale operand_scale) {
+  // TODO(oth): restore this to a statically determined constant.
+  int offset = 1;
+  for (int operand_index = 0; operand_index < i; ++operand_index) {
+    OperandSize operand_size =
+        GetOperandSize(bytecode, operand_index, operand_scale);
+    offset += static_cast<int>(operand_size);
   }
-  UNREACHABLE();
-  return 0;
+  return offset;
 }
 
-
 // static
-OperandSize Bytecodes::SizeOfOperand(OperandType operand_type) {
-  switch (operand_type) {
-#define CASE(Name, Size)     \
-  case OperandType::k##Name: \
-    return Size;
-    OPERAND_TYPE_LIST(CASE)
-#undef CASE
-  }
-  UNREACHABLE();
-  return OperandSize::kNone;
+OperandSize Bytecodes::SizeOfOperand(OperandType operand_type,
+                                     OperandScale operand_scale) {
+  return static_cast<OperandSize>(
+      ScaledOperandSize(operand_type, operand_scale));
 }
 
-
 // static
 bool Bytecodes::IsConditionalJumpImmediate(Bytecode bytecode) {
   return bytecode == Bytecode::kJumpIfTrue ||
@@ -227,24 +319,10 @@
          bytecode == Bytecode::kJumpIfUndefinedConstant;
 }
 
-
-// static
-bool Bytecodes::IsConditionalJumpConstantWide(Bytecode bytecode) {
-  return bytecode == Bytecode::kJumpIfTrueConstantWide ||
-         bytecode == Bytecode::kJumpIfFalseConstantWide ||
-         bytecode == Bytecode::kJumpIfToBooleanTrueConstantWide ||
-         bytecode == Bytecode::kJumpIfToBooleanFalseConstantWide ||
-         bytecode == Bytecode::kJumpIfNotHoleConstantWide ||
-         bytecode == Bytecode::kJumpIfNullConstantWide ||
-         bytecode == Bytecode::kJumpIfUndefinedConstantWide;
-}
-
-
 // static
 bool Bytecodes::IsConditionalJump(Bytecode bytecode) {
   return IsConditionalJumpImmediate(bytecode) ||
-         IsConditionalJumpConstant(bytecode) ||
-         IsConditionalJumpConstantWide(bytecode);
+         IsConditionalJumpConstant(bytecode);
 }
 
 
@@ -260,26 +338,23 @@
          IsConditionalJumpConstant(bytecode);
 }
 
-
-// static
-bool Bytecodes::IsJumpConstantWide(Bytecode bytecode) {
-  return bytecode == Bytecode::kJumpConstantWide ||
-         IsConditionalJumpConstantWide(bytecode);
-}
-
-
 // static
 bool Bytecodes::IsJump(Bytecode bytecode) {
-  return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode) ||
-         IsJumpConstantWide(bytecode);
+  return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode);
 }
 
 
 // static
 bool Bytecodes::IsCallOrNew(Bytecode bytecode) {
   return bytecode == Bytecode::kCall || bytecode == Bytecode::kTailCall ||
-         bytecode == Bytecode::kNew || bytecode == Bytecode::kCallWide ||
-         bytecode == Bytecode::kTailCallWide || bytecode == Bytecode::kNewWide;
+         bytecode == Bytecode::kNew;
+}
+
+// static
+bool Bytecodes::IsCallRuntime(Bytecode bytecode) {
+  return bytecode == Bytecode::kCallRuntime ||
+         bytecode == Bytecode::kCallRuntimeForPair ||
+         bytecode == Bytecode::kInvokeIntrinsic;
 }
 
 // static
@@ -296,31 +371,40 @@
 }
 
 // static
+bool Bytecodes::IsBytecodeWithScalableOperands(Bytecode bytecode) {
+  switch (bytecode) {
+#define CASE(Name, ...)                              \
+  case Bytecode::k##Name:                            \
+    typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
+    return Name##Trait::IsScalable();
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return false;
+}
+
+// static
+bool Bytecodes::IsPrefixScalingBytecode(Bytecode bytecode) {
+  switch (bytecode) {
+    case Bytecode::kExtraWide:
+    case Bytecode::kDebugBreakExtraWide:
+    case Bytecode::kWide:
+    case Bytecode::kDebugBreakWide:
+      return true;
+    default:
+      return false;
+  }
+}
+
+// static
 bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
   return bytecode == Bytecode::kReturn || IsJump(bytecode);
 }
 
 // static
-bool Bytecodes::IsIndexOperandType(OperandType operand_type) {
-  return operand_type == OperandType::kIdx8 ||
-         operand_type == OperandType::kIdx16;
-}
-
-// static
-bool Bytecodes::IsImmediateOperandType(OperandType operand_type) {
-  return operand_type == OperandType::kImm8;
-}
-
-// static
-bool Bytecodes::IsRegisterCountOperandType(OperandType operand_type) {
-  return (operand_type == OperandType::kRegCount8 ||
-          operand_type == OperandType::kRegCount16);
-}
-
-// static
 bool Bytecodes::IsMaybeRegisterOperandType(OperandType operand_type) {
-  return (operand_type == OperandType::kMaybeReg8 ||
-          operand_type == OperandType::kMaybeReg16);
+  return operand_type == OperandType::kMaybeReg;
 }
 
 // static
@@ -376,41 +460,102 @@
   return false;
 }
 
-namespace {
-static Register DecodeRegister(const uint8_t* operand_start,
-                               OperandType operand_type) {
-  switch (Bytecodes::SizeOfOperand(operand_type)) {
-    case OperandSize::kByte:
-      return Register::FromOperand(*operand_start);
-    case OperandSize::kShort:
-      return Register::FromWideOperand(ReadUnalignedUInt16(operand_start));
-    case OperandSize::kNone: {
-      UNREACHABLE();
-    }
+// static
+bool Bytecodes::IsUnsignedOperandType(OperandType operand_type) {
+  switch (operand_type) {
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    return OperandTraits<OperandType::k##Name>::TypeInfo::kIsUnsigned;
+    OPERAND_TYPE_LIST(CASE)
+#undef CASE
   }
-  return Register();
+  UNREACHABLE();
+  return false;
 }
-}  // namespace
 
+// static
+OperandScale Bytecodes::NextOperandScale(OperandScale operand_scale) {
+  DCHECK(operand_scale >= OperandScale::kSingle &&
+         operand_scale <= OperandScale::kMaxValid);
+  return static_cast<OperandScale>(2 * static_cast<int>(operand_scale));
+}
+
+// static
+Register Bytecodes::DecodeRegisterOperand(const uint8_t* operand_start,
+                                          OperandType operand_type,
+                                          OperandScale operand_scale) {
+  DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
+  int32_t operand =
+      DecodeSignedOperand(operand_start, operand_type, operand_scale);
+  return Register::FromOperand(operand);
+}
+
+// static
+int32_t Bytecodes::DecodeSignedOperand(const uint8_t* operand_start,
+                                       OperandType operand_type,
+                                       OperandScale operand_scale) {
+  DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+  switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
+    case OperandSize::kByte:
+      return static_cast<int8_t>(*operand_start);
+    case OperandSize::kShort:
+      return static_cast<int16_t>(ReadUnalignedUInt16(operand_start));
+    case OperandSize::kQuad:
+      return static_cast<int32_t>(ReadUnalignedUInt32(operand_start));
+    case OperandSize::kNone:
+      UNREACHABLE();
+  }
+  return 0;
+}
+
+// static
+uint32_t Bytecodes::DecodeUnsignedOperand(const uint8_t* operand_start,
+                                          OperandType operand_type,
+                                          OperandScale operand_scale) {
+  DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+  switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
+    case OperandSize::kByte:
+      return *operand_start;
+    case OperandSize::kShort:
+      return ReadUnalignedUInt16(operand_start);
+    case OperandSize::kQuad:
+      return ReadUnalignedUInt32(operand_start);
+    case OperandSize::kNone:
+      UNREACHABLE();
+  }
+  return 0;
+}
 
 // static
 std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
                                 int parameter_count) {
-  Vector<char> buf = Vector<char>::New(50);
-
   Bytecode bytecode = Bytecodes::FromByte(bytecode_start[0]);
-  int bytecode_size = Bytecodes::Size(bytecode);
-
-  for (int i = 0; i < bytecode_size; i++) {
-    SNPrintF(buf, "%02x ", bytecode_start[i]);
-    os << buf.start();
+  int prefix_offset = 0;
+  OperandScale operand_scale = OperandScale::kSingle;
+  if (IsPrefixScalingBytecode(bytecode)) {
+    prefix_offset = 1;
+    operand_scale = Bytecodes::PrefixBytecodeToOperandScale(bytecode);
+    bytecode = Bytecodes::FromByte(bytecode_start[1]);
   }
+
+  // Prepare to print bytecode and operands as hex digits.
+  std::ios saved_format(nullptr);
+  saved_format.copyfmt(saved_format);
+  os.fill('0');
+  os.flags(std::ios::hex);
+
+  int bytecode_size = Bytecodes::Size(bytecode, operand_scale);
+  for (int i = 0; i < prefix_offset + bytecode_size; i++) {
+    os << std::setw(2) << static_cast<uint32_t>(bytecode_start[i]) << ' ';
+  }
+  os.copyfmt(saved_format);
+
   const int kBytecodeColumnSize = 6;
-  for (int i = bytecode_size; i < kBytecodeColumnSize; i++) {
+  for (int i = prefix_offset + bytecode_size; i < kBytecodeColumnSize; i++) {
     os << "   ";
   }
 
-  os << bytecode << " ";
+  os << Bytecodes::ToString(bytecode, operand_scale) << " ";
 
   // Operands for the debug break are from the original instruction.
   if (IsDebugBreak(bytecode)) return os;
@@ -420,42 +565,42 @@
   for (int i = 0; i < number_of_operands; i++) {
     OperandType op_type = GetOperandType(bytecode, i);
     const uint8_t* operand_start =
-        &bytecode_start[GetOperandOffset(bytecode, i)];
+        &bytecode_start[prefix_offset +
+                        GetOperandOffset(bytecode, i, operand_scale)];
     switch (op_type) {
-      case interpreter::OperandType::kRegCount8:
-        os << "#" << static_cast<unsigned int>(*operand_start);
+      case interpreter::OperandType::kRegCount:
+        os << "#"
+           << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
         break;
-      case interpreter::OperandType::kRegCount16:
-        os << '#' << ReadUnalignedUInt16(operand_start);
+      case interpreter::OperandType::kIdx:
+      case interpreter::OperandType::kRuntimeId:
+        os << "["
+           << DecodeUnsignedOperand(operand_start, op_type, operand_scale)
+           << "]";
         break;
-      case interpreter::OperandType::kIdx8:
-        os << "[" << static_cast<unsigned int>(*operand_start) << "]";
+      case interpreter::OperandType::kImm:
+        os << "[" << DecodeSignedOperand(operand_start, op_type, operand_scale)
+           << "]";
         break;
-      case interpreter::OperandType::kIdx16:
-        os << "[" << ReadUnalignedUInt16(operand_start) << "]";
+      case interpreter::OperandType::kFlag8:
+        os << "#"
+           << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
         break;
-      case interpreter::OperandType::kImm8:
-        os << "#" << static_cast<int>(static_cast<int8_t>(*operand_start));
-        break;
-      case interpreter::OperandType::kMaybeReg8:
-      case interpreter::OperandType::kMaybeReg16:
-      case interpreter::OperandType::kReg8:
-      case interpreter::OperandType::kReg16:
-      case interpreter::OperandType::kRegOut8:
-      case interpreter::OperandType::kRegOut16: {
-        Register reg = DecodeRegister(operand_start, op_type);
+      case interpreter::OperandType::kMaybeReg:
+      case interpreter::OperandType::kReg:
+      case interpreter::OperandType::kRegOut: {
+        Register reg =
+            DecodeRegisterOperand(operand_start, op_type, operand_scale);
         os << reg.ToString(parameter_count);
         break;
       }
-      case interpreter::OperandType::kRegOutTriple8:
-      case interpreter::OperandType::kRegOutTriple16:
+      case interpreter::OperandType::kRegOutTriple:
         range += 1;
-      case interpreter::OperandType::kRegOutPair8:
-      case interpreter::OperandType::kRegOutPair16:
-      case interpreter::OperandType::kRegPair8:
-      case interpreter::OperandType::kRegPair16: {
+      case interpreter::OperandType::kRegOutPair:
+      case interpreter::OperandType::kRegPair: {
         range += 1;
-        Register first_reg = DecodeRegister(operand_start, op_type);
+        Register first_reg =
+            DecodeRegisterOperand(operand_start, op_type, operand_scale);
         Register last_reg = Register(first_reg.index() + range);
         os << first_reg.ToString(parameter_count) << "-"
            << last_reg.ToString(parameter_count);
@@ -472,20 +617,33 @@
   return os;
 }
 
+// static
+bool Bytecodes::BytecodeHasHandler(Bytecode bytecode,
+                                   OperandScale operand_scale) {
+  return operand_scale == OperandScale::kSingle ||
+         Bytecodes::IsBytecodeWithScalableOperands(bytecode);
+}
+
 std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode) {
   return os << Bytecodes::ToString(bytecode);
 }
 
-
-std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
-  return os << Bytecodes::OperandTypeToString(operand_type);
+std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use) {
+  return os << Bytecodes::AccumulatorUseToString(use);
 }
 
-
 std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size) {
   return os << Bytecodes::OperandSizeToString(operand_size);
 }
 
+std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale) {
+  return os << Bytecodes::OperandScaleToString(operand_scale);
+}
+
+std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
+  return os << Bytecodes::OperandTypeToString(operand_type);
+}
+
 static const int kLastParamRegisterIndex =
     -InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
 static const int kFunctionClosureRegisterIndex =
@@ -495,29 +653,17 @@
 static const int kNewTargetRegisterIndex =
     -InterpreterFrameConstants::kNewTargetFromRegisterPointer / kPointerSize;
 
-// The register space is a signed 16-bit space. Register operands
-// occupy range above 0. Parameter indices are biased with the
-// negative value kLastParamRegisterIndex for ease of access in the
-// interpreter.
-static const int kMaxParameterIndex = kMaxInt16 + kLastParamRegisterIndex;
-static const int kMaxRegisterIndex = -kMinInt16;
-static const int kMaxReg8Index = -kMinInt8;
-static const int kMinReg8Index = -kMaxInt8;
-static const int kMaxReg16Index = -kMinInt16;
-static const int kMinReg16Index = -kMaxInt16;
-
 bool Register::is_byte_operand() const {
-  return index_ >= kMinReg8Index && index_ <= kMaxReg8Index;
+  return index_ >= -kMaxInt8 && index_ <= -kMinInt8;
 }
 
 bool Register::is_short_operand() const {
-  return index_ >= kMinReg16Index && index_ <= kMaxReg16Index;
+  return index_ >= -kMaxInt16 && index_ <= -kMinInt16;
 }
 
 Register Register::FromParameterIndex(int index, int parameter_count) {
   DCHECK_GE(index, 0);
   DCHECK_LT(index, parameter_count);
-  DCHECK_LE(parameter_count, kMaxParameterIndex + 1);
   int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
   DCHECK_LT(register_index, 0);
   return Register(register_index);
@@ -557,44 +703,6 @@
   return index() == kNewTargetRegisterIndex;
 }
 
-int Register::MaxParameterIndex() { return kMaxParameterIndex; }
-
-int Register::MaxRegisterIndex() { return kMaxRegisterIndex; }
-
-int Register::MaxRegisterIndexForByteOperand() { return kMaxReg8Index; }
-
-uint8_t Register::ToOperand() const {
-  DCHECK(is_byte_operand());
-  return static_cast<uint8_t>(-index_);
-}
-
-
-Register Register::FromOperand(uint8_t operand) {
-  return Register(-static_cast<int8_t>(operand));
-}
-
-
-uint16_t Register::ToWideOperand() const {
-  DCHECK(is_short_operand());
-  return static_cast<uint16_t>(-index_);
-}
-
-
-Register Register::FromWideOperand(uint16_t operand) {
-  return Register(-static_cast<int16_t>(operand));
-}
-
-
-uint32_t Register::ToRawOperand() const {
-  return static_cast<uint32_t>(-index_);
-}
-
-
-Register Register::FromRawOperand(uint32_t operand) {
-  return Register(-static_cast<int32_t>(operand));
-}
-
-
 bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
                              Register reg4, Register reg5) {
   if (reg1.index() + 1 != reg2.index()) {
diff --git a/src/interpreter/bytecodes.h b/src/interpreter/bytecodes.h
index d4863b1..2361271 100644
--- a/src/interpreter/bytecodes.h
+++ b/src/interpreter/bytecodes.h
@@ -15,37 +15,24 @@
 namespace internal {
 namespace interpreter {
 
-#define INVALID_OPERAND_TYPE_LIST(V) \
-  V(None, OperandSize::kNone)
+#define INVALID_OPERAND_TYPE_LIST(V) V(None, OperandTypeInfo::kNone)
 
-#define REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
-  /* Byte operands. */                      \
-  V(MaybeReg8, OperandSize::kByte)          \
-  V(Reg8, OperandSize::kByte)               \
-  V(RegPair8, OperandSize::kByte)           \
-  /* Short operands. */                     \
-  V(MaybeReg16, OperandSize::kShort)        \
-  V(Reg16, OperandSize::kShort)             \
-  V(RegPair16, OperandSize::kShort)
+#define REGISTER_INPUT_OPERAND_TYPE_LIST(V)         \
+  V(MaybeReg, OperandTypeInfo::kScalableSignedByte) \
+  V(Reg, OperandTypeInfo::kScalableSignedByte)      \
+  V(RegPair, OperandTypeInfo::kScalableSignedByte)
 
-#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V) \
-  /* Byte operands. */                       \
-  V(RegOut8, OperandSize::kByte)             \
-  V(RegOutPair8, OperandSize::kByte)         \
-  V(RegOutTriple8, OperandSize::kByte)       \
-  /* Short operands. */                      \
-  V(RegOut16, OperandSize::kShort)           \
-  V(RegOutPair16, OperandSize::kShort)       \
-  V(RegOutTriple16, OperandSize::kShort)
+#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)          \
+  V(RegOut, OperandTypeInfo::kScalableSignedByte)     \
+  V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
+  V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
 
-#define SCALAR_OPERAND_TYPE_LIST(V) \
-  /* Byte operands. */              \
-  V(Idx8, OperandSize::kByte)       \
-  V(Imm8, OperandSize::kByte)       \
-  V(RegCount8, OperandSize::kByte)  \
-  /* Short operands. */             \
-  V(Idx16, OperandSize::kShort)     \
-  V(RegCount16, OperandSize::kShort)
+#define SCALAR_OPERAND_TYPE_LIST(V)                   \
+  V(Flag8, OperandTypeInfo::kFixedUnsignedByte)       \
+  V(Idx, OperandTypeInfo::kScalableUnsignedByte)      \
+  V(Imm, OperandTypeInfo::kScalableSignedByte)        \
+  V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
+  V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
 
 #define REGISTER_OPERAND_TYPE_LIST(V) \
   REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
@@ -60,235 +47,258 @@
   NON_REGISTER_OPERAND_TYPE_LIST(V) \
   REGISTER_OPERAND_TYPE_LIST(V)
 
-// Define one debug break bytecode for each operands size.
-#define DEBUG_BREAK_BYTECODE_LIST(V)                                           \
-  V(DebugBreak0, OperandType::kNone)                                           \
-  V(DebugBreak1, OperandType::kReg8)                                           \
-  V(DebugBreak2, OperandType::kReg16)                                          \
-  V(DebugBreak3, OperandType::kReg16, OperandType::kReg8)                      \
-  V(DebugBreak4, OperandType::kReg16, OperandType::kReg16)                     \
-  V(DebugBreak5, OperandType::kReg16, OperandType::kReg16, OperandType::kReg8) \
-  V(DebugBreak6, OperandType::kReg16, OperandType::kReg16,                     \
-    OperandType::kReg16)                                                       \
-  V(DebugBreak7, OperandType::kReg16, OperandType::kReg16,                     \
-    OperandType::kReg16, OperandType::kReg8)                                   \
-  V(DebugBreak8, OperandType::kReg16, OperandType::kReg16,                     \
-    OperandType::kReg16, OperandType::kReg16)
+// Define one debug break bytecode for each possible size of unscaled
+// bytecodes. Format is V(<bytecode>, <accumulator_use>, <operands>).
+#define DEBUG_BREAK_PLAIN_BYTECODE_LIST(V)                                    \
+  V(DebugBreak0, AccumulatorUse::kRead)                                       \
+  V(DebugBreak1, AccumulatorUse::kRead, OperandType::kReg)                    \
+  V(DebugBreak2, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg) \
+  V(DebugBreak3, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg, \
+    OperandType::kReg)                                                        \
+  V(DebugBreak4, AccumulatorUse::kRead, OperandType::kReg, OperandType::kReg, \
+    OperandType::kReg, OperandType::kReg)                                     \
+  V(DebugBreak5, AccumulatorUse::kRead, OperandType::kRuntimeId,              \
+    OperandType::kReg, OperandType::kReg)                                     \
+  V(DebugBreak6, AccumulatorUse::kRead, OperandType::kRuntimeId,              \
+    OperandType::kReg, OperandType::kReg, OperandType::kReg)
+
+// Define one debug break for each widening prefix.
+#define DEBUG_BREAK_PREFIX_BYTECODE_LIST(V) \
+  V(DebugBreakWide, AccumulatorUse::kRead)  \
+  V(DebugBreakExtraWide, AccumulatorUse::kRead)
+
+#define DEBUG_BREAK_BYTECODE_LIST(V) \
+  DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
+  DEBUG_BREAK_PREFIX_BYTECODE_LIST(V)
 
 // The list of bytecodes which are interpreted by the interpreter.
-#define BYTECODE_LIST(V)                                                       \
-                                                                               \
-  /* Loading the accumulator */                                                \
-  V(LdaZero, OperandType::kNone)                                               \
-  V(LdaSmi8, OperandType::kImm8)                                               \
-  V(LdaUndefined, OperandType::kNone)                                          \
-  V(LdaNull, OperandType::kNone)                                               \
-  V(LdaTheHole, OperandType::kNone)                                            \
-  V(LdaTrue, OperandType::kNone)                                               \
-  V(LdaFalse, OperandType::kNone)                                              \
-  V(LdaConstant, OperandType::kIdx8)                                           \
-  V(LdaConstantWide, OperandType::kIdx16)                                      \
-                                                                               \
-  /* Globals */                                                                \
-  V(LdaGlobal, OperandType::kIdx8, OperandType::kIdx8)                         \
-  V(LdaGlobalInsideTypeof, OperandType::kIdx8, OperandType::kIdx8)             \
-  V(LdaGlobalWide, OperandType::kIdx16, OperandType::kIdx16)                   \
-  V(LdaGlobalInsideTypeofWide, OperandType::kIdx16, OperandType::kIdx16)       \
-  V(StaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8)                   \
-  V(StaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8)                   \
-  V(StaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16)             \
-  V(StaGlobalStrictWide, OperandType::kIdx16, OperandType::kIdx16)             \
-                                                                               \
-  /* Context operations */                                                     \
-  V(PushContext, OperandType::kReg8)                                           \
-  V(PopContext, OperandType::kReg8)                                            \
-  V(LdaContextSlot, OperandType::kReg8, OperandType::kIdx8)                    \
-  V(StaContextSlot, OperandType::kReg8, OperandType::kIdx8)                    \
-  V(LdaContextSlotWide, OperandType::kReg8, OperandType::kIdx16)               \
-  V(StaContextSlotWide, OperandType::kReg8, OperandType::kIdx16)               \
-                                                                               \
-  /* Load-Store lookup slots */                                                \
-  V(LdaLookupSlot, OperandType::kIdx8)                                         \
-  V(LdaLookupSlotInsideTypeof, OperandType::kIdx8)                             \
-  V(LdaLookupSlotWide, OperandType::kIdx16)                                    \
-  V(LdaLookupSlotInsideTypeofWide, OperandType::kIdx16)                        \
-  V(StaLookupSlotSloppy, OperandType::kIdx8)                                   \
-  V(StaLookupSlotStrict, OperandType::kIdx8)                                   \
-  V(StaLookupSlotSloppyWide, OperandType::kIdx16)                              \
-  V(StaLookupSlotStrictWide, OperandType::kIdx16)                              \
-                                                                               \
-  /* Register-accumulator transfers */                                         \
-  V(Ldar, OperandType::kReg8)                                                  \
-  V(Star, OperandType::kRegOut8)                                               \
-                                                                               \
-  /* Register-register transfers */                                            \
-  V(Mov, OperandType::kReg8, OperandType::kRegOut8)                            \
-  V(MovWide, OperandType::kReg16, OperandType::kRegOut16)                      \
-                                                                               \
-  /* LoadIC operations */                                                      \
-  V(LoadIC, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8)        \
-  V(KeyedLoadIC, OperandType::kReg8, OperandType::kIdx8)                       \
-  V(LoadICWide, OperandType::kReg8, OperandType::kIdx16, OperandType::kIdx16)  \
-  V(KeyedLoadICWide, OperandType::kReg8, OperandType::kIdx16)                  \
-                                                                               \
-  /* StoreIC operations */                                                     \
-  V(StoreICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
-  V(StoreICStrict, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
-  V(KeyedStoreICSloppy, OperandType::kReg8, OperandType::kReg8,                \
-    OperandType::kIdx8)                                                        \
-  V(KeyedStoreICStrict, OperandType::kReg8, OperandType::kReg8,                \
-    OperandType::kIdx8)                                                        \
-  V(StoreICSloppyWide, OperandType::kReg8, OperandType::kIdx16,                \
-    OperandType::kIdx16)                                                       \
-  V(StoreICStrictWide, OperandType::kReg8, OperandType::kIdx16,                \
-    OperandType::kIdx16)                                                       \
-  V(KeyedStoreICSloppyWide, OperandType::kReg8, OperandType::kReg8,            \
-    OperandType::kIdx16)                                                       \
-  V(KeyedStoreICStrictWide, OperandType::kReg8, OperandType::kReg8,            \
-    OperandType::kIdx16)                                                       \
-                                                                               \
-  /* Binary Operators */                                                       \
-  V(Add, OperandType::kReg8)                                                   \
-  V(Sub, OperandType::kReg8)                                                   \
-  V(Mul, OperandType::kReg8)                                                   \
-  V(Div, OperandType::kReg8)                                                   \
-  V(Mod, OperandType::kReg8)                                                   \
-  V(BitwiseOr, OperandType::kReg8)                                             \
-  V(BitwiseXor, OperandType::kReg8)                                            \
-  V(BitwiseAnd, OperandType::kReg8)                                            \
-  V(ShiftLeft, OperandType::kReg8)                                             \
-  V(ShiftRight, OperandType::kReg8)                                            \
-  V(ShiftRightLogical, OperandType::kReg8)                                     \
-                                                                               \
-  /* Unary Operators */                                                        \
-  V(Inc, OperandType::kNone)                                                   \
-  V(Dec, OperandType::kNone)                                                   \
-  V(LogicalNot, OperandType::kNone)                                            \
-  V(TypeOf, OperandType::kNone)                                                \
-  V(DeletePropertyStrict, OperandType::kReg8)                                  \
-  V(DeletePropertySloppy, OperandType::kReg8)                                  \
-                                                                               \
-  /* Call operations */                                                        \
-  V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kRegCount8,     \
-    OperandType::kIdx8)                                                        \
-  V(CallWide, OperandType::kReg16, OperandType::kReg16,                        \
-    OperandType::kRegCount16, OperandType::kIdx16)                             \
-  V(TailCall, OperandType::kReg8, OperandType::kReg8, OperandType::kRegCount8, \
-    OperandType::kIdx8)                                                        \
-  V(TailCallWide, OperandType::kReg16, OperandType::kReg16,                    \
-    OperandType::kRegCount16, OperandType::kIdx16)                             \
-  V(CallRuntime, OperandType::kIdx16, OperandType::kMaybeReg8,                 \
-    OperandType::kRegCount8)                                                   \
-  V(CallRuntimeWide, OperandType::kIdx16, OperandType::kMaybeReg16,            \
-    OperandType::kRegCount8)                                                   \
-  V(CallRuntimeForPair, OperandType::kIdx16, OperandType::kMaybeReg8,          \
-    OperandType::kRegCount8, OperandType::kRegOutPair8)                        \
-  V(CallRuntimeForPairWide, OperandType::kIdx16, OperandType::kMaybeReg16,     \
-    OperandType::kRegCount8, OperandType::kRegOutPair16)                       \
-  V(CallJSRuntime, OperandType::kIdx16, OperandType::kReg8,                    \
-    OperandType::kRegCount8)                                                   \
-  V(CallJSRuntimeWide, OperandType::kIdx16, OperandType::kReg16,               \
-    OperandType::kRegCount16)                                                  \
-                                                                               \
-  /* New operator */                                                           \
-  V(New, OperandType::kReg8, OperandType::kMaybeReg8, OperandType::kRegCount8) \
-  V(NewWide, OperandType::kReg16, OperandType::kMaybeReg16,                    \
-    OperandType::kRegCount16)                                                  \
-                                                                               \
-  /* Test Operators */                                                         \
-  V(TestEqual, OperandType::kReg8)                                             \
-  V(TestNotEqual, OperandType::kReg8)                                          \
-  V(TestEqualStrict, OperandType::kReg8)                                       \
-  V(TestNotEqualStrict, OperandType::kReg8)                                    \
-  V(TestLessThan, OperandType::kReg8)                                          \
-  V(TestGreaterThan, OperandType::kReg8)                                       \
-  V(TestLessThanOrEqual, OperandType::kReg8)                                   \
-  V(TestGreaterThanOrEqual, OperandType::kReg8)                                \
-  V(TestInstanceOf, OperandType::kReg8)                                        \
-  V(TestIn, OperandType::kReg8)                                                \
-                                                                               \
-  /* Cast operators */                                                         \
-  V(ToName, OperandType::kNone)                                                \
-  V(ToNumber, OperandType::kNone)                                              \
-  V(ToObject, OperandType::kNone)                                              \
-                                                                               \
-  /* Literals */                                                               \
-  V(CreateRegExpLiteral, OperandType::kIdx8, OperandType::kIdx8,               \
-    OperandType::kImm8)                                                        \
-  V(CreateArrayLiteral, OperandType::kIdx8, OperandType::kIdx8,                \
-    OperandType::kImm8)                                                        \
-  V(CreateObjectLiteral, OperandType::kIdx8, OperandType::kIdx8,               \
-    OperandType::kImm8)                                                        \
-  V(CreateRegExpLiteralWide, OperandType::kIdx16, OperandType::kIdx16,         \
-    OperandType::kImm8)                                                        \
-  V(CreateArrayLiteralWide, OperandType::kIdx16, OperandType::kIdx16,          \
-    OperandType::kImm8)                                                        \
-  V(CreateObjectLiteralWide, OperandType::kIdx16, OperandType::kIdx16,         \
-    OperandType::kImm8)                                                        \
-                                                                               \
-  /* Closure allocation */                                                     \
-  V(CreateClosure, OperandType::kIdx8, OperandType::kImm8)                     \
-  V(CreateClosureWide, OperandType::kIdx16, OperandType::kImm8)                \
-                                                                               \
-  /* Arguments allocation */                                                   \
-  V(CreateMappedArguments, OperandType::kNone)                                 \
-  V(CreateUnmappedArguments, OperandType::kNone)                               \
-  V(CreateRestParameter, OperandType::kNone)                                   \
-                                                                               \
-  /* Control Flow */                                                           \
-  V(Jump, OperandType::kImm8)                                                  \
-  V(JumpConstant, OperandType::kIdx8)                                          \
-  V(JumpConstantWide, OperandType::kIdx16)                                     \
-  V(JumpIfTrue, OperandType::kImm8)                                            \
-  V(JumpIfTrueConstant, OperandType::kIdx8)                                    \
-  V(JumpIfTrueConstantWide, OperandType::kIdx16)                               \
-  V(JumpIfFalse, OperandType::kImm8)                                           \
-  V(JumpIfFalseConstant, OperandType::kIdx8)                                   \
-  V(JumpIfFalseConstantWide, OperandType::kIdx16)                              \
-  V(JumpIfToBooleanTrue, OperandType::kImm8)                                   \
-  V(JumpIfToBooleanTrueConstant, OperandType::kIdx8)                           \
-  V(JumpIfToBooleanTrueConstantWide, OperandType::kIdx16)                      \
-  V(JumpIfToBooleanFalse, OperandType::kImm8)                                  \
-  V(JumpIfToBooleanFalseConstant, OperandType::kIdx8)                          \
-  V(JumpIfToBooleanFalseConstantWide, OperandType::kIdx16)                     \
-  V(JumpIfNull, OperandType::kImm8)                                            \
-  V(JumpIfNullConstant, OperandType::kIdx8)                                    \
-  V(JumpIfNullConstantWide, OperandType::kIdx16)                               \
-  V(JumpIfUndefined, OperandType::kImm8)                                       \
-  V(JumpIfUndefinedConstant, OperandType::kIdx8)                               \
-  V(JumpIfUndefinedConstantWide, OperandType::kIdx16)                          \
-  V(JumpIfNotHole, OperandType::kImm8)                                         \
-  V(JumpIfNotHoleConstant, OperandType::kIdx8)                                 \
-  V(JumpIfNotHoleConstantWide, OperandType::kIdx16)                            \
-                                                                               \
-  /* Complex flow control For..in */                                           \
-  V(ForInPrepare, OperandType::kRegOutTriple8)                                 \
-  V(ForInPrepareWide, OperandType::kRegOutTriple16)                            \
-  V(ForInDone, OperandType::kReg8, OperandType::kReg8)                         \
-  V(ForInNext, OperandType::kReg8, OperandType::kReg8, OperandType::kRegPair8) \
-  V(ForInNextWide, OperandType::kReg16, OperandType::kReg16,                   \
-    OperandType::kRegPair16)                                                   \
-  V(ForInStep, OperandType::kReg8)                                             \
-                                                                               \
-  /* Perform a stack guard check */                                            \
-  V(StackCheck, OperandType::kNone)                                            \
-                                                                               \
-  /* Non-local flow control */                                                 \
-  V(Throw, OperandType::kNone)                                                 \
-  V(ReThrow, OperandType::kNone)                                               \
-  V(Return, OperandType::kNone)                                                \
-                                                                               \
-  /* Debugger */                                                               \
-  V(Debugger, OperandType::kNone)                                              \
-  DEBUG_BREAK_BYTECODE_LIST(V)
+#define BYTECODE_LIST(V)                                                      \
+  /* Extended width operands */                                               \
+  V(Wide, AccumulatorUse::kNone)                                              \
+  V(ExtraWide, AccumulatorUse::kNone)                                         \
+                                                                              \
+  /* Loading the accumulator */                                               \
+  V(LdaZero, AccumulatorUse::kWrite)                                          \
+  V(LdaSmi, AccumulatorUse::kWrite, OperandType::kImm)                        \
+  V(LdaUndefined, AccumulatorUse::kWrite)                                     \
+  V(LdaNull, AccumulatorUse::kWrite)                                          \
+  V(LdaTheHole, AccumulatorUse::kWrite)                                       \
+  V(LdaTrue, AccumulatorUse::kWrite)                                          \
+  V(LdaFalse, AccumulatorUse::kWrite)                                         \
+  V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx)                   \
+                                                                              \
+  /* Globals */                                                               \
+  V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx, OperandType::kIdx)  \
+  V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx,         \
+    OperandType::kIdx)                                                        \
+  V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx,                \
+    OperandType::kIdx)                                                        \
+  V(StaGlobalStrict, AccumulatorUse::kRead, OperandType::kIdx,                \
+    OperandType::kIdx)                                                        \
+                                                                              \
+  /* Context operations */                                                    \
+  V(PushContext, AccumulatorUse::kRead, OperandType::kReg)                    \
+  V(PopContext, AccumulatorUse::kNone, OperandType::kReg)                     \
+  V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg,                \
+    OperandType::kIdx)                                                        \
+  V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg,                 \
+    OperandType::kIdx)                                                        \
+                                                                              \
+  /* Load-Store lookup slots */                                               \
+  V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx)                 \
+  V(LdaLookupSlotInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx)     \
+  V(StaLookupSlotSloppy, AccumulatorUse::kReadWrite, OperandType::kIdx)       \
+  V(StaLookupSlotStrict, AccumulatorUse::kReadWrite, OperandType::kIdx)       \
+                                                                              \
+  /* Register-accumulator transfers */                                        \
+  V(Ldar, AccumulatorUse::kWrite, OperandType::kReg)                          \
+  V(Star, AccumulatorUse::kRead, OperandType::kRegOut)                        \
+                                                                              \
+  /* Register-register transfers */                                           \
+  V(Mov, AccumulatorUse::kNone, OperandType::kReg, OperandType::kRegOut)      \
+                                                                              \
+  /* LoadIC operations */                                                     \
+  V(LoadIC, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kIdx,     \
+    OperandType::kIdx)                                                        \
+  V(KeyedLoadIC, AccumulatorUse::kReadWrite, OperandType::kReg,               \
+    OperandType::kIdx)                                                        \
+                                                                              \
+  /* StoreIC operations */                                                    \
+  V(StoreICSloppy, AccumulatorUse::kRead, OperandType::kReg,                  \
+    OperandType::kIdx, OperandType::kIdx)                                     \
+  V(StoreICStrict, AccumulatorUse::kRead, OperandType::kReg,                  \
+    OperandType::kIdx, OperandType::kIdx)                                     \
+  V(KeyedStoreICSloppy, AccumulatorUse::kRead, OperandType::kReg,             \
+    OperandType::kReg, OperandType::kIdx)                                     \
+  V(KeyedStoreICStrict, AccumulatorUse::kRead, OperandType::kReg,             \
+    OperandType::kReg, OperandType::kIdx)                                     \
+                                                                              \
+  /* Binary Operators */                                                      \
+  V(Add, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
+  V(Sub, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
+  V(Mul, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
+  V(Div, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
+  V(Mod, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
+  V(BitwiseOr, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
+  V(BitwiseXor, AccumulatorUse::kReadWrite, OperandType::kReg)                \
+  V(BitwiseAnd, AccumulatorUse::kReadWrite, OperandType::kReg)                \
+  V(ShiftLeft, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
+  V(ShiftRight, AccumulatorUse::kReadWrite, OperandType::kReg)                \
+  V(ShiftRightLogical, AccumulatorUse::kReadWrite, OperandType::kReg)         \
+                                                                              \
+  /* Unary Operators */                                                       \
+  V(Inc, AccumulatorUse::kReadWrite)                                          \
+  V(Dec, AccumulatorUse::kReadWrite)                                          \
+  V(LogicalNot, AccumulatorUse::kReadWrite)                                   \
+  V(TypeOf, AccumulatorUse::kReadWrite)                                       \
+  V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg)      \
+  V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg)      \
+                                                                              \
+  /* Call operations */                                                       \
+  V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,       \
+    OperandType::kRegCount, OperandType::kIdx)                                \
+  V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,   \
+    OperandType::kRegCount, OperandType::kIdx)                                \
+  V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId,             \
+    OperandType::kMaybeReg, OperandType::kRegCount)                           \
+  V(CallRuntimeForPair, AccumulatorUse::kNone, OperandType::kRuntimeId,       \
+    OperandType::kMaybeReg, OperandType::kRegCount, OperandType::kRegOutPair) \
+  V(CallJSRuntime, AccumulatorUse::kWrite, OperandType::kIdx,                 \
+    OperandType::kReg, OperandType::kRegCount)                                \
+                                                                              \
+  /* Intrinsics */                                                            \
+  V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kRuntimeId,         \
+    OperandType::kMaybeReg, OperandType::kRegCount)                           \
+                                                                              \
+  /* New operator */                                                          \
+  V(New, AccumulatorUse::kReadWrite, OperandType::kReg,                       \
+    OperandType::kMaybeReg, OperandType::kRegCount)                           \
+                                                                              \
+  /* Test Operators */                                                        \
+  V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
+  V(TestNotEqual, AccumulatorUse::kReadWrite, OperandType::kReg)              \
+  V(TestEqualStrict, AccumulatorUse::kReadWrite, OperandType::kReg)           \
+  V(TestLessThan, AccumulatorUse::kReadWrite, OperandType::kReg)              \
+  V(TestGreaterThan, AccumulatorUse::kReadWrite, OperandType::kReg)           \
+  V(TestLessThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg)       \
+  V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg)    \
+  V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg)            \
+  V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg)                    \
+                                                                              \
+  /* Cast operators */                                                        \
+  V(ToName, AccumulatorUse::kReadWrite)                                       \
+  V(ToNumber, AccumulatorUse::kReadWrite)                                     \
+  V(ToObject, AccumulatorUse::kReadWrite)                                     \
+                                                                              \
+  /* Literals */                                                              \
+  V(CreateRegExpLiteral, AccumulatorUse::kWrite, OperandType::kIdx,           \
+    OperandType::kIdx, OperandType::kFlag8)                                   \
+  V(CreateArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx,            \
+    OperandType::kIdx, OperandType::kFlag8)                                   \
+  V(CreateObjectLiteral, AccumulatorUse::kWrite, OperandType::kIdx,           \
+    OperandType::kIdx, OperandType::kFlag8)                                   \
+                                                                              \
+  /* Closure allocation */                                                    \
+  V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx,                 \
+    OperandType::kFlag8)                                                      \
+                                                                              \
+  /* Arguments allocation */                                                  \
+  V(CreateMappedArguments, AccumulatorUse::kWrite)                            \
+  V(CreateUnmappedArguments, AccumulatorUse::kWrite)                          \
+  V(CreateRestParameter, AccumulatorUse::kWrite)                              \
+                                                                              \
+  /* Control Flow */                                                          \
+  V(Jump, AccumulatorUse::kNone, OperandType::kImm)                           \
+  V(JumpConstant, AccumulatorUse::kNone, OperandType::kIdx)                   \
+  V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm)                     \
+  V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)             \
+  V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm)                    \
+  V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)            \
+  V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm)            \
+  V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)    \
+  V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm)           \
+  V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)   \
+  V(JumpIfNull, AccumulatorUse::kRead, OperandType::kImm)                     \
+  V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx)             \
+  V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kImm)                \
+  V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx)        \
+  V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm)                  \
+  V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx)          \
+                                                                              \
+  /* Complex flow control For..in */                                          \
+  V(ForInPrepare, AccumulatorUse::kRead, OperandType::kRegOutTriple)          \
+  V(ForInDone, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg)  \
+  V(ForInNext, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,  \
+    OperandType::kRegPair, OperandType::kIdx)                                 \
+  V(ForInStep, AccumulatorUse::kWrite, OperandType::kReg)                     \
+                                                                              \
+  /* Perform a stack guard check */                                           \
+  V(StackCheck, AccumulatorUse::kNone)                                        \
+                                                                              \
+  /* Non-local flow control */                                                \
+  V(Throw, AccumulatorUse::kRead)                                             \
+  V(ReThrow, AccumulatorUse::kRead)                                           \
+  V(Return, AccumulatorUse::kNone)                                            \
+                                                                              \
+  /* Debugger */                                                              \
+  V(Debugger, AccumulatorUse::kNone)                                          \
+  DEBUG_BREAK_BYTECODE_LIST(V)                                                \
+                                                                              \
+  /* Illegal bytecode (terminates execution) */                               \
+  V(Illegal, AccumulatorUse::kNone)
 
-// Enumeration of the size classes of operand types used by bytecodes.
+enum class AccumulatorUse : uint8_t {
+  kNone = 0,
+  kRead = 1 << 0,
+  kWrite = 1 << 1,
+  kReadWrite = kRead | kWrite
+};
+
+V8_INLINE AccumulatorUse operator&(AccumulatorUse lhs, AccumulatorUse rhs) {
+  int result = static_cast<int>(lhs) & static_cast<int>(rhs);
+  return static_cast<AccumulatorUse>(result);
+}
+
+V8_INLINE AccumulatorUse operator|(AccumulatorUse lhs, AccumulatorUse rhs) {
+  int result = static_cast<int>(lhs) | static_cast<int>(rhs);
+  return static_cast<AccumulatorUse>(result);
+}
+
+// Enumeration of scaling factors applicable to scalable operands. Code
+// relies on being able to cast values to integer scaling values.
+enum class OperandScale : uint8_t {
+  kSingle = 1,
+  kDouble = 2,
+  kQuadruple = 4,
+  kMaxValid = kQuadruple,
+  kInvalid = 8,
+};
+
+// Enumeration of the size classes of operand types used by
+// bytecodes. Code relies on being able to cast values to integer
+// types to get the size in bytes.
 enum class OperandSize : uint8_t {
   kNone = 0,
   kByte = 1,
   kShort = 2,
+  kQuad = 4,
+  kLast = kQuad
 };
 
+// Primitive operand info used that summarize properties of operands.
+// Columns are Name, IsScalable, IsUnsigned, UnscaledSize.
+#define OPERAND_TYPE_INFO_LIST(V)                         \
+  V(None, false, false, OperandSize::kNone)               \
+  V(ScalableSignedByte, true, false, OperandSize::kByte)  \
+  V(ScalableUnsignedByte, true, true, OperandSize::kByte) \
+  V(FixedUnsignedByte, false, true, OperandSize::kByte)   \
+  V(FixedUnsignedShort, false, true, OperandSize::kShort)
+
+enum class OperandTypeInfo : uint8_t {
+#define DECLARE_OPERAND_TYPE_INFO(Name, ...) k##Name,
+  OPERAND_TYPE_INFO_LIST(DECLARE_OPERAND_TYPE_INFO)
+#undef DECLARE_OPERAND_TYPE_INFO
+};
 
 // Enumeration of operand types used by bytecodes.
 enum class OperandType : uint8_t {
@@ -330,9 +340,6 @@
 
   static Register FromParameterIndex(int index, int parameter_count);
   int ToParameterIndex(int parameter_count) const;
-  static int MaxParameterIndex();
-  static int MaxRegisterIndex();
-  static int MaxRegisterIndexForByteOperand();
 
   // Returns an invalid register.
   static Register invalid_value() { return Register(); }
@@ -349,14 +356,8 @@
   static Register new_target();
   bool is_new_target() const;
 
-  static Register FromOperand(uint8_t operand);
-  uint8_t ToOperand() const;
-
-  static Register FromWideOperand(uint16_t operand);
-  uint16_t ToWideOperand() const;
-
-  static Register FromRawOperand(uint32_t raw_operand);
-  uint32_t ToRawOperand() const;
+  int32_t ToOperand() const { return -index_; }
+  static Register FromOperand(int32_t operand) { return Register(-operand); }
 
   static bool AreContiguous(Register reg1, Register reg2,
                             Register reg3 = Register(),
@@ -399,9 +400,18 @@
   // Returns string representation of |bytecode|.
   static const char* ToString(Bytecode bytecode);
 
+  // Returns string representation of |bytecode|.
+  static std::string ToString(Bytecode bytecode, OperandScale operand_scale);
+
+  // Returns string representation of |accumulator_use|.
+  static const char* AccumulatorUseToString(AccumulatorUse accumulator_use);
+
   // Returns string representation of |operand_type|.
   static const char* OperandTypeToString(OperandType operand_type);
 
+  // Returns string representation of |operand_scale|.
+  static const char* OperandScaleToString(OperandScale operand_scale);
+
   // Returns string representation of |operand_size|.
   static const char* OperandSizeToString(OperandSize operand_size);
 
@@ -417,57 +427,72 @@
   // Returns the number of register operands expected by |bytecode|.
   static int NumberOfRegisterOperands(Bytecode bytecode);
 
+  // Returns the prefix bytecode representing an operand scale to be
+  // applied to a a bytecode.
+  static Bytecode OperandScaleToPrefixBytecode(OperandScale operand_scale);
+
+  // Returns true if the operand scale requires a prefix bytecode.
+  static bool OperandScaleRequiresPrefixBytecode(OperandScale operand_scale);
+
+  // Returns the scaling applied to scalable operands if bytecode is
+  // is a scaling prefix.
+  static OperandScale PrefixBytecodeToOperandScale(Bytecode bytecode);
+
+  // Returns how accumulator is used by |bytecode|.
+  static AccumulatorUse GetAccumulatorUse(Bytecode bytecode);
+
+  // Returns true if |bytecode| reads the accumulator.
+  static bool ReadsAccumulator(Bytecode bytecode);
+
+  // Returns true if |bytecode| writes the accumulator.
+  static bool WritesAccumulator(Bytecode bytecode);
+
   // Returns the i-th operand of |bytecode|.
   static OperandType GetOperandType(Bytecode bytecode, int i);
 
   // Returns the size of the i-th operand of |bytecode|.
-  static OperandSize GetOperandSize(Bytecode bytecode, int i);
+  static OperandSize GetOperandSize(Bytecode bytecode, int i,
+                                    OperandScale operand_scale);
 
   // Returns the offset of the i-th operand of |bytecode| relative to the start
   // of the bytecode.
-  static int GetOperandOffset(Bytecode bytecode, int i);
+  static int GetOperandOffset(Bytecode bytecode, int i,
+                              OperandScale operand_scale);
 
   // Returns a zero-based bitmap of the register operand positions of
   // |bytecode|.
   static int GetRegisterOperandBitmap(Bytecode bytecode);
 
-  // Returns a debug break bytecode with a matching operand size.
+  // Returns a debug break bytecode to replace |bytecode|.
   static Bytecode GetDebugBreak(Bytecode bytecode);
 
-  // Returns the size of the bytecode including its operands.
-  static int Size(Bytecode bytecode);
+  // Returns the size of the bytecode including its operands for the
+  // given |operand_scale|.
+  static int Size(Bytecode bytecode, OperandScale operand_scale);
 
   // Returns the size of |operand|.
-  static OperandSize SizeOfOperand(OperandType operand);
+  static OperandSize SizeOfOperand(OperandType operand, OperandScale scale);
 
   // Returns true if the bytecode is a conditional jump taking
-  // an immediate byte operand (OperandType::kImm8).
+  // an immediate byte operand (OperandType::kImm).
   static bool IsConditionalJumpImmediate(Bytecode bytecode);
 
   // Returns true if the bytecode is a conditional jump taking
-  // a constant pool entry (OperandType::kIdx8).
+  // a constant pool entry (OperandType::kIdx).
   static bool IsConditionalJumpConstant(Bytecode bytecode);
 
   // Returns true if the bytecode is a conditional jump taking
-  // a constant pool entry (OperandType::kIdx16).
-  static bool IsConditionalJumpConstantWide(Bytecode bytecode);
-
-  // Returns true if the bytecode is a conditional jump taking
   // any kind of operand.
   static bool IsConditionalJump(Bytecode bytecode);
 
   // Returns true if the bytecode is a jump or a conditional jump taking
-  // an immediate byte operand (OperandType::kImm8).
+  // an immediate byte operand (OperandType::kImm).
   static bool IsJumpImmediate(Bytecode bytecode);
 
   // Returns true if the bytecode is a jump or conditional jump taking a
-  // constant pool entry (OperandType::kIdx8).
+  // constant pool entry (OperandType::kIdx).
   static bool IsJumpConstant(Bytecode bytecode);
 
-  // Returns true if the bytecode is a jump or conditional jump taking a
-  // constant pool entry (OperandType::kIdx16).
-  static bool IsJumpConstantWide(Bytecode bytecode);
-
   // Returns true if the bytecode is a jump or conditional jump taking
   // any kind of operand.
   static bool IsJump(Bytecode bytecode);
@@ -478,18 +503,17 @@
   // Returns true if the bytecode is a call or a constructor call.
   static bool IsCallOrNew(Bytecode bytecode);
 
+  // Returns true if the bytecode is a call to the runtime.
+  static bool IsCallRuntime(Bytecode bytecode);
+
   // Returns true if the bytecode is a debug break.
   static bool IsDebugBreak(Bytecode bytecode);
 
-  // Returns true if |operand_type| is a register index operand (kIdx8/kIdx16).
-  static bool IsIndexOperandType(OperandType operand_type);
+  // Returns true if the bytecode has wider operand forms.
+  static bool IsBytecodeWithScalableOperands(Bytecode bytecode);
 
-  // Returns true if |operand_type| represents an immediate.
-  static bool IsImmediateOperandType(OperandType operand_type);
-
-  // Returns true if |operand_type| is a register count operand
-  // (kRegCount8/kRegCount16).
-  static bool IsRegisterCountOperandType(OperandType operand_type);
+  // Returns true if the bytecode is a scaling prefix bytecode.
+  static bool IsPrefixScalingBytecode(Bytecode bytecode);
 
   // Returns true if |operand_type| is any type of register operand.
   static bool IsRegisterOperandType(OperandType operand_type);
@@ -501,20 +525,52 @@
   static bool IsRegisterOutputOperandType(OperandType operand_type);
 
   // Returns true if |operand_type| is a maybe register operand
-  // (kMaybeReg8/kMaybeReg16).
+  // (kMaybeReg).
   static bool IsMaybeRegisterOperandType(OperandType operand_type);
 
+  // Returns true if |operand_type| is a runtime-id operand (kRuntimeId).
+  static bool IsRuntimeIdOperandType(OperandType operand_type);
+
+  // Returns true if |operand_type| is unsigned, false if signed.
+  static bool IsUnsignedOperandType(OperandType operand_type);
+
+  // Decodes a register operand in a byte array.
+  static Register DecodeRegisterOperand(const uint8_t* operand_start,
+                                        OperandType operand_type,
+                                        OperandScale operand_scale);
+
+  // Decodes a signed operand in a byte array.
+  static int32_t DecodeSignedOperand(const uint8_t* operand_start,
+                                     OperandType operand_type,
+                                     OperandScale operand_scale);
+
+  // Decodes an unsigned operand in a byte array.
+  static uint32_t DecodeUnsignedOperand(const uint8_t* operand_start,
+                                        OperandType operand_type,
+                                        OperandScale operand_scale);
+
   // Decode a single bytecode and operands to |os|.
   static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
                               int number_of_parameters);
 
+  // Returns true if a handler is generated for a bytecode at a given
+  // operand scale. All bytecodes have handlers at OperandScale::kSingle,
+  // but only bytecodes with scalable operands have handlers with larger
+  // OperandScale values.
+  static bool BytecodeHasHandler(Bytecode bytecode, OperandScale operand_scale);
+
+  // Return the next larger operand scale.
+  static OperandScale NextOperandScale(OperandScale operand_scale);
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Bytecodes);
 };
 
 std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
+std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use);
+std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale);
+std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size);
 std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
-std::ostream& operator<<(std::ostream& os, const OperandSize& operand_type);
 
 }  // namespace interpreter
 }  // namespace internal
diff --git a/src/interpreter/constant-array-builder.cc b/src/interpreter/constant-array-builder.cc
index e8b1281..7ce50b5 100644
--- a/src/interpreter/constant-array-builder.cc
+++ b/src/interpreter/constant-array-builder.cc
@@ -11,28 +11,25 @@
 namespace internal {
 namespace interpreter {
 
-ConstantArrayBuilder::ConstantArraySlice::ConstantArraySlice(Zone* zone,
-                                                             size_t start_index,
-                                                             size_t capacity)
+ConstantArrayBuilder::ConstantArraySlice::ConstantArraySlice(
+    Zone* zone, size_t start_index, size_t capacity, OperandSize operand_size)
     : start_index_(start_index),
       capacity_(capacity),
       reserved_(0),
+      operand_size_(operand_size),
       constants_(zone) {}
 
-
 void ConstantArrayBuilder::ConstantArraySlice::Reserve() {
   DCHECK_GT(available(), 0u);
   reserved_++;
   DCHECK_LE(reserved_, capacity() - constants_.size());
 }
 
-
 void ConstantArrayBuilder::ConstantArraySlice::Unreserve() {
   DCHECK_GT(reserved_, 0u);
   reserved_--;
 }
 
-
 size_t ConstantArrayBuilder::ConstantArraySlice::Allocate(
     Handle<Object> object) {
   DCHECK_GT(available(), 0u);
@@ -42,45 +39,57 @@
   return index + start_index();
 }
 
-
 Handle<Object> ConstantArrayBuilder::ConstantArraySlice::At(
     size_t index) const {
+  DCHECK_GE(index, start_index());
+  DCHECK_LT(index, start_index() + size());
   return constants_[index - start_index()];
 }
 
-
-STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::kMaxCapacity;
-STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::kLowCapacity;
-
+STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::k8BitCapacity;
+STATIC_CONST_MEMBER_DEFINITION const size_t
+    ConstantArrayBuilder::k16BitCapacity;
+STATIC_CONST_MEMBER_DEFINITION const size_t
+    ConstantArrayBuilder::k32BitCapacity;
 
 ConstantArrayBuilder::ConstantArrayBuilder(Isolate* isolate, Zone* zone)
-    : isolate_(isolate),
-      idx8_slice_(zone, 0, kLowCapacity),
-      idx16_slice_(zone, kLowCapacity, kHighCapacity),
-      constants_map_(isolate->heap(), zone) {
-  STATIC_ASSERT(kMaxCapacity == static_cast<size_t>(kMaxUInt16 + 1));
-  DCHECK_EQ(idx8_slice_.start_index(), 0u);
-  DCHECK_EQ(idx8_slice_.capacity(), kLowCapacity);
-  DCHECK_EQ(idx16_slice_.start_index(), kLowCapacity);
-  DCHECK_EQ(idx16_slice_.capacity(), kMaxCapacity - kLowCapacity);
+    : isolate_(isolate), constants_map_(isolate->heap(), zone) {
+  idx_slice_[0] =
+      new (zone) ConstantArraySlice(zone, 0, k8BitCapacity, OperandSize::kByte);
+  idx_slice_[1] = new (zone) ConstantArraySlice(
+      zone, k8BitCapacity, k16BitCapacity, OperandSize::kShort);
+  idx_slice_[2] = new (zone) ConstantArraySlice(
+      zone, k8BitCapacity + k16BitCapacity, k32BitCapacity, OperandSize::kQuad);
 }
 
-
 size_t ConstantArrayBuilder::size() const {
-  if (idx16_slice_.size() > 0) {
-    return idx16_slice_.start_index() + idx16_slice_.size();
-  } else {
-    return idx8_slice_.size();
+  size_t i = arraysize(idx_slice_);
+  while (i > 0) {
+    ConstantArraySlice* slice = idx_slice_[--i];
+    if (slice->size() > 0) {
+      return slice->start_index() + slice->size();
+    }
   }
+  return idx_slice_[0]->size();
 }
 
+const ConstantArrayBuilder::ConstantArraySlice*
+ConstantArrayBuilder::IndexToSlice(size_t index) const {
+  for (const ConstantArraySlice* slice : idx_slice_) {
+    if (index <= slice->max_index()) {
+      return slice;
+    }
+  }
+  UNREACHABLE();
+  return nullptr;
+}
 
 Handle<Object> ConstantArrayBuilder::At(size_t index) const {
-  if (index >= idx16_slice_.start_index()) {
-    return idx16_slice_.At(index);
-  } else if (index < idx8_slice_.size()) {
-    return idx8_slice_.At(index);
+  const ConstantArraySlice* slice = IndexToSlice(index);
+  if (index < slice->start_index() + slice->size()) {
+    return slice->At(index);
   } else {
+    DCHECK_LT(index, slice->capacity());
     return isolate_->factory()->the_hole_value();
   }
 }
@@ -88,49 +97,82 @@
 Handle<FixedArray> ConstantArrayBuilder::ToFixedArray() {
   Handle<FixedArray> fixed_array = isolate_->factory()->NewFixedArray(
       static_cast<int>(size()), PretenureFlag::TENURED);
-  for (int i = 0; i < fixed_array->length(); i++) {
-    fixed_array->set(i, *At(static_cast<size_t>(i)));
+  int array_index = 0;
+  for (const ConstantArraySlice* slice : idx_slice_) {
+    if (array_index == fixed_array->length()) {
+      break;
+    }
+    DCHECK(array_index == 0 ||
+           base::bits::IsPowerOfTwo32(static_cast<uint32_t>(array_index)));
+    // Copy objects from slice into array.
+    for (size_t i = 0; i < slice->size(); ++i) {
+      fixed_array->set(array_index++, *slice->At(slice->start_index() + i));
+    }
+    // Insert holes where reservations led to unused slots.
+    size_t padding =
+        std::min(static_cast<size_t>(fixed_array->length() - array_index),
+                 slice->capacity() - slice->size());
+    for (size_t i = 0; i < padding; i++) {
+      fixed_array->set(array_index++, *isolate_->factory()->the_hole_value());
+    }
   }
+  DCHECK_EQ(array_index, fixed_array->length());
   constants_map()->Clear();
   return fixed_array;
 }
 
-
 size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
   index_t* entry = constants_map()->Find(object);
   return (entry == nullptr) ? AllocateEntry(object) : *entry;
 }
 
-
 ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateEntry(
     Handle<Object> object) {
   DCHECK(!object->IsOddball());
-  size_t index;
   index_t* entry = constants_map()->Get(object);
-  if (idx8_slice_.available() > 0) {
-    index = idx8_slice_.Allocate(object);
-  } else {
-    index = idx16_slice_.Allocate(object);
+  for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
+    if (idx_slice_[i]->available() > 0) {
+      size_t index = idx_slice_[i]->Allocate(object);
+      *entry = static_cast<index_t>(index);
+      return *entry;
+      break;
+    }
   }
-  CHECK_LT(index, kMaxCapacity);
-  *entry = static_cast<index_t>(index);
-  return *entry;
+  UNREACHABLE();
+  return kMaxUInt32;
 }
 
-
 OperandSize ConstantArrayBuilder::CreateReservedEntry() {
-  if (idx8_slice_.available() > 0) {
-    idx8_slice_.Reserve();
-    return OperandSize::kByte;
-  } else if (idx16_slice_.available() > 0) {
-    idx16_slice_.Reserve();
-    return OperandSize::kShort;
-  } else {
-    UNREACHABLE();
-    return OperandSize::kNone;
+  for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
+    if (idx_slice_[i]->available() > 0) {
+      idx_slice_[i]->Reserve();
+      return idx_slice_[i]->operand_size();
+    }
   }
+  UNREACHABLE();
+  return OperandSize::kNone;
 }
 
+ConstantArrayBuilder::ConstantArraySlice*
+ConstantArrayBuilder::OperandSizeToSlice(OperandSize operand_size) const {
+  ConstantArraySlice* slice = nullptr;
+  switch (operand_size) {
+    case OperandSize::kNone:
+      UNREACHABLE();
+      break;
+    case OperandSize::kByte:
+      slice = idx_slice_[0];
+      break;
+    case OperandSize::kShort:
+      slice = idx_slice_[1];
+      break;
+    case OperandSize::kQuad:
+      slice = idx_slice_[2];
+      break;
+  }
+  DCHECK(slice->operand_size() == operand_size);
+  return slice;
+}
 
 size_t ConstantArrayBuilder::CommitReservedEntry(OperandSize operand_size,
                                                  Handle<Object> object) {
@@ -140,33 +182,20 @@
   if (nullptr == entry) {
     index = AllocateEntry(object);
   } else {
-    if (operand_size == OperandSize::kByte &&
-        *entry >= idx8_slice_.capacity()) {
-      // The object is already in the constant array, but has an index
-      // outside the range of an idx8 operand so we need to create a
-      // duplicate entry in the idx8 operand range to satisfy the
-      // commitment.
-      *entry = static_cast<index_t>(idx8_slice_.Allocate(object));
+    ConstantArraySlice* slice = OperandSizeToSlice(operand_size);
+    if (*entry > slice->max_index()) {
+      // The object is already in the constant array, but may have an
+      // index too big for the reserved operand_size. So, duplicate
+      // entry with the smaller operand size.
+      *entry = static_cast<index_t>(slice->Allocate(object));
     }
     index = *entry;
   }
-  DCHECK(operand_size == OperandSize::kShort || index < idx8_slice_.capacity());
-  DCHECK_LT(index, kMaxCapacity);
   return index;
 }
 
-
 void ConstantArrayBuilder::DiscardReservedEntry(OperandSize operand_size) {
-  switch (operand_size) {
-    case OperandSize::kByte:
-      idx8_slice_.Unreserve();
-      return;
-    case OperandSize::kShort:
-      idx16_slice_.Unreserve();
-      return;
-    default:
-      UNREACHABLE();
-  }
+  OperandSizeToSlice(operand_size)->Unreserve();
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/constant-array-builder.h b/src/interpreter/constant-array-builder.h
index d7e41e3..1a68646 100644
--- a/src/interpreter/constant-array-builder.h
+++ b/src/interpreter/constant-array-builder.h
@@ -23,13 +23,14 @@
 class ConstantArrayBuilder final BASE_EMBEDDED {
  public:
   // Capacity of the 8-bit operand slice.
-  static const size_t kLowCapacity = 1u << kBitsPerByte;
-
-  // Capacity of the combined 8-bit and 16-bit operand slices.
-  static const size_t kMaxCapacity = 1u << (2 * kBitsPerByte);
+  static const size_t k8BitCapacity = 1u << kBitsPerByte;
 
   // Capacity of the 16-bit operand slice.
-  static const size_t kHighCapacity = kMaxCapacity - kLowCapacity;
+  static const size_t k16BitCapacity = (1u << 2 * kBitsPerByte) - k8BitCapacity;
+
+  // Capacity of the 32-bit operand slice.
+  static const size_t k32BitCapacity =
+      kMaxUInt32 - k16BitCapacity - k8BitCapacity + 1;
 
   ConstantArrayBuilder(Isolate* isolate, Zone* zone);
 
@@ -60,12 +61,13 @@
   void DiscardReservedEntry(OperandSize operand_size);
 
  private:
-  typedef uint16_t index_t;
+  typedef uint32_t index_t;
 
   index_t AllocateEntry(Handle<Object> object);
 
-  struct ConstantArraySlice final {
-    ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity);
+  struct ConstantArraySlice final : public ZoneObject {
+    ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity,
+                       OperandSize operand_size);
     void Reserve();
     void Unreserve();
     size_t Allocate(Handle<Object> object);
@@ -76,21 +78,26 @@
     inline size_t capacity() const { return capacity_; }
     inline size_t size() const { return constants_.size(); }
     inline size_t start_index() const { return start_index_; }
+    inline size_t max_index() const { return start_index_ + capacity() - 1; }
+    inline OperandSize operand_size() const { return operand_size_; }
 
    private:
     const size_t start_index_;
     const size_t capacity_;
     size_t reserved_;
+    OperandSize operand_size_;
     ZoneVector<Handle<Object>> constants_;
 
     DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
   };
 
+  const ConstantArraySlice* IndexToSlice(size_t index) const;
+  ConstantArraySlice* OperandSizeToSlice(OperandSize operand_size) const;
+
   IdentityMap<index_t>* constants_map() { return &constants_map_; }
 
   Isolate* isolate_;
-  ConstantArraySlice idx8_slice_;
-  ConstantArraySlice idx16_slice_;
+  ConstantArraySlice* idx_slice_[3];
   IdentityMap<index_t> constants_map_;
 };
 
diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc
index 440e879..2663e4a 100644
--- a/src/interpreter/interpreter-assembler.cc
+++ b/src/interpreter/interpreter-assembler.cc
@@ -22,12 +22,16 @@
 using compiler::Node;
 
 InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
-                                           Bytecode bytecode)
-    : compiler::CodeStubAssembler(
-          isolate, zone, InterpreterDispatchDescriptor(isolate),
-          Code::ComputeFlags(Code::STUB), Bytecodes::ToString(bytecode), 0),
+                                           Bytecode bytecode,
+                                           OperandScale operand_scale)
+    : compiler::CodeStubAssembler(isolate, zone,
+                                  InterpreterDispatchDescriptor(isolate),
+                                  Code::ComputeFlags(Code::BYTECODE_HANDLER),
+                                  Bytecodes::ToString(bytecode), 0),
       bytecode_(bytecode),
+      operand_scale_(operand_scale),
       accumulator_(this, MachineRepresentation::kTagged),
+      accumulator_use_(AccumulatorUse::kNone),
       context_(this, MachineRepresentation::kTagged),
       bytecode_array_(this, MachineRepresentation::kTagged),
       disable_stack_check_across_call_(false),
@@ -42,11 +46,26 @@
   }
 }
 
-InterpreterAssembler::~InterpreterAssembler() {}
+InterpreterAssembler::~InterpreterAssembler() {
+  // If the following check fails the handler does not use the
+  // accumulator in the way described in the bytecode definitions in
+  // bytecodes.h.
+  DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
+}
 
-Node* InterpreterAssembler::GetAccumulator() { return accumulator_.value(); }
+Node* InterpreterAssembler::GetAccumulatorUnchecked() {
+  return accumulator_.value();
+}
+
+Node* InterpreterAssembler::GetAccumulator() {
+  DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
+  accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
+  return GetAccumulatorUnchecked();
+}
 
 void InterpreterAssembler::SetAccumulator(Node* value) {
+  DCHECK(Bytecodes::WritesAccumulator(bytecode_));
+  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
   accumulator_.Bind(value);
 }
 
@@ -79,11 +98,11 @@
 
 Node* InterpreterAssembler::LoadRegister(int offset) {
   return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
-              Int32Constant(offset));
+              IntPtrConstant(offset));
 }
 
 Node* InterpreterAssembler::LoadRegister(Register reg) {
-  return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
+  return LoadRegister(IntPtrConstant(-reg.index()));
 }
 
 Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
@@ -97,12 +116,12 @@
 
 Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
   return StoreNoWriteBarrier(MachineRepresentation::kTagged,
-                             RegisterFileRawPointer(), Int32Constant(offset),
+                             RegisterFileRawPointer(), IntPtrConstant(offset),
                              value);
 }
 
 Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
-  return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
+  return StoreRegister(value, IntPtrConstant(-reg.index()));
 }
 
 Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
@@ -113,27 +132,31 @@
 
 Node* InterpreterAssembler::NextRegister(Node* reg_index) {
   // Register indexes are negative, so the next index is minus one.
-  return IntPtrAdd(reg_index, Int32Constant(-1));
+  return IntPtrAdd(reg_index, IntPtrConstant(-1));
 }
 
-Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
-  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(OperandSize::kByte,
-            Bytecodes::GetOperandSize(bytecode_, operand_index));
-  return Load(
-      MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-      IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
-                                      bytecode_, operand_index))));
+Node* InterpreterAssembler::OperandOffset(int operand_index) {
+  return IntPtrConstant(
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
 }
 
-Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) {
   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(OperandSize::kByte,
-            Bytecodes::GetOperandSize(bytecode_, operand_index));
-  Node* load = Load(
-      MachineType::Int8(), BytecodeArrayTaggedPointer(),
-      IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
-                                      bytecode_, operand_index))));
+  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
+                                    bytecode_, operand_index, operand_scale()));
+  Node* operand_offset = OperandOffset(operand_index);
+  return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+              IntPtrAdd(BytecodeOffset(), operand_offset));
+}
+
+Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
+                                    bytecode_, operand_index, operand_scale()));
+  Node* operand_offset = OperandOffset(operand_index);
+  Node* load = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
+                    IntPtrAdd(BytecodeOffset(), operand_offset));
+
   // Ensure that we sign extend to full pointer size
   if (kPointerSize == 8) {
     load = ChangeInt32ToInt64(load);
@@ -141,58 +164,85 @@
   return load;
 }
 
-Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
-  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(OperandSize::kShort,
-            Bytecodes::GetOperandSize(bytecode_, operand_index));
-  if (TargetSupportsUnalignedAccess()) {
-    return Load(
-        MachineType::Uint16(), BytecodeArrayTaggedPointer(),
-        IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
-                                        bytecode_, operand_index))));
-  } else {
-    int offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
-    Node* first_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-                            IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
-    Node* second_byte =
-        Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-             IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
+compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
+    int relative_offset, MachineType result_type) {
+  static const int kMaxCount = 4;
+  DCHECK(!TargetSupportsUnalignedAccess());
+
+  int count;
+  switch (result_type.representation()) {
+    case MachineRepresentation::kWord16:
+      count = 2;
+      break;
+    case MachineRepresentation::kWord32:
+      count = 4;
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  MachineType msb_type =
+      result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();
+
 #if V8_TARGET_LITTLE_ENDIAN
-    return WordOr(WordShl(second_byte, kBitsPerByte), first_byte);
+  const int kStep = -1;
+  int msb_offset = count - 1;
 #elif V8_TARGET_BIG_ENDIAN
-    return WordOr(WordShl(first_byte, kBitsPerByte), second_byte);
+  const int kStep = 1;
+  int msb_offset = 0;
 #else
 #error "Unknown Architecture"
 #endif
+
+  // Read the most signicant bytecode into bytes[0] and then in order
+  // down to least significant in bytes[count - 1].
+  DCHECK(count <= kMaxCount);
+  compiler::Node* bytes[kMaxCount];
+  for (int i = 0; i < count; i++) {
+    MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
+    Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
+    Node* array_offset = IntPtrAdd(BytecodeOffset(), offset);
+    bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset);
+  }
+
+  // Pack LSB to MSB.
+  Node* result = bytes[--count];
+  for (int i = 1; --count >= 0; i++) {
+    Node* shift = Int32Constant(i * kBitsPerByte);
+    Node* value = Word32Shl(bytes[count], shift);
+    result = Word32Or(value, result);
+  }
+  return result;
+}
+
+Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(
+      OperandSize::kShort,
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
+  int operand_offset =
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
+  if (TargetSupportsUnalignedAccess()) {
+    return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
+                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+  } else {
+    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16());
   }
 }
 
-Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
-    int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) {
   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(OperandSize::kShort,
-            Bytecodes::GetOperandSize(bytecode_, operand_index));
-  int operand_offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
+  DCHECK_EQ(
+      OperandSize::kShort,
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
+  int operand_offset =
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
   Node* load;
   if (TargetSupportsUnalignedAccess()) {
     load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
-                IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
+                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
   } else {
-#if V8_TARGET_LITTLE_ENDIAN
-    Node* hi_byte_offset = Int32Constant(operand_offset + 1);
-    Node* lo_byte_offset = Int32Constant(operand_offset);
-#elif V8_TARGET_BIG_ENDIAN
-    Node* hi_byte_offset = Int32Constant(operand_offset);
-    Node* lo_byte_offset = Int32Constant(operand_offset + 1);
-#else
-#error "Unknown Architecture"
-#endif
-    Node* hi_byte = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
-                         IntPtrAdd(BytecodeOffset(), hi_byte_offset));
-    Node* lo_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-                         IntPtrAdd(BytecodeOffset(), lo_byte_offset));
-    hi_byte = Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
-    load = Word32Or(hi_byte, lo_byte);
+    load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
   }
 
   // Ensure that we sign extend to full pointer size
@@ -202,57 +252,123 @@
   return load;
 }
 
-Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
-  switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
+                                    bytecode_, operand_index, operand_scale()));
+  int operand_offset =
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
+  if (TargetSupportsUnalignedAccess()) {
+    return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
+                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+  } else {
+    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32());
+  }
+}
+
+Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
+                                    bytecode_, operand_index, operand_scale()));
+  int operand_offset =
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
+  Node* load;
+  if (TargetSupportsUnalignedAccess()) {
+    load = Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
+                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+  } else {
+    load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
+  }
+
+  // Ensure that we sign extend to full pointer size
+  if (kPointerSize == 8) {
+    load = ChangeInt32ToInt64(load);
+  }
+  return load;
+}
+
+Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
+                                                  OperandSize operand_size) {
+  DCHECK(!Bytecodes::IsUnsignedOperandType(
+      Bytecodes::GetOperandType(bytecode_, operand_index)));
+  switch (operand_size) {
     case OperandSize::kByte:
-      DCHECK_EQ(OperandType::kRegCount8,
-                Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperand(operand_index);
+      return BytecodeOperandSignedByte(operand_index);
     case OperandSize::kShort:
-      DCHECK_EQ(OperandType::kRegCount16,
-                Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperandShort(operand_index);
+      return BytecodeOperandSignedShort(operand_index);
+    case OperandSize::kQuad:
+      return BytecodeOperandSignedQuad(operand_index);
     case OperandSize::kNone:
       UNREACHABLE();
   }
   return nullptr;
 }
 
+Node* InterpreterAssembler::BytecodeUnsignedOperand(int operand_index,
+                                                    OperandSize operand_size) {
+  DCHECK(Bytecodes::IsUnsignedOperandType(
+      Bytecodes::GetOperandType(bytecode_, operand_index)));
+  switch (operand_size) {
+    case OperandSize::kByte:
+      return BytecodeOperandUnsignedByte(operand_index);
+    case OperandSize::kShort:
+      return BytecodeOperandUnsignedShort(operand_index);
+    case OperandSize::kQuad:
+      return BytecodeOperandUnsignedQuad(operand_index);
+    case OperandSize::kNone:
+      UNREACHABLE();
+  }
+  return nullptr;
+}
+
+Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
+  DCHECK_EQ(OperandType::kRegCount,
+            Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  return BytecodeUnsignedOperand(operand_index, operand_size);
+}
+
+Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
+  DCHECK_EQ(OperandType::kFlag8,
+            Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  DCHECK_EQ(operand_size, OperandSize::kByte);
+  return BytecodeUnsignedOperand(operand_index, operand_size);
+}
+
 Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
-  DCHECK_EQ(OperandType::kImm8,
+  DCHECK_EQ(OperandType::kImm,
             Bytecodes::GetOperandType(bytecode_, operand_index));
-  return BytecodeOperandSignExtended(operand_index);
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  return BytecodeSignedOperand(operand_index, operand_size);
 }
 
 Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
-  switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
-    case OperandSize::kByte:
-      DCHECK_EQ(OperandType::kIdx8,
-                Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperand(operand_index);
-    case OperandSize::kShort:
-      DCHECK_EQ(OperandType::kIdx16,
-                Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperandShort(operand_index);
-    case OperandSize::kNone:
-      UNREACHABLE();
-  }
-  return nullptr;
+  DCHECK(OperandType::kIdx ==
+         Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  return BytecodeUnsignedOperand(operand_index, operand_size);
 }
 
 Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
-  OperandType operand_type =
-      Bytecodes::GetOperandType(bytecode_, operand_index);
-  if (Bytecodes::IsRegisterOperandType(operand_type)) {
-    OperandSize operand_size = Bytecodes::SizeOfOperand(operand_type);
-    if (operand_size == OperandSize::kByte) {
-      return BytecodeOperandSignExtended(operand_index);
-    } else if (operand_size == OperandSize::kShort) {
-      return BytecodeOperandShortSignExtended(operand_index);
-    }
-  }
-  UNREACHABLE();
-  return nullptr;
+  DCHECK(Bytecodes::IsRegisterOperandType(
+      Bytecodes::GetOperandType(bytecode_, operand_index)));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  return BytecodeSignedOperand(operand_index, operand_size);
+}
+
+Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
+  DCHECK(OperandType::kRuntimeId ==
+         Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  DCHECK_EQ(operand_size, OperandSize::kShort);
+  return BytecodeUnsignedOperand(operand_index, operand_size);
 }
 
 Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
@@ -264,14 +380,6 @@
   return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
 }
 
-Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
-                                                  int index) {
-  Node* entry_offset =
-      IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
-                WordShl(Int32Constant(index), kPointerSizeLog2));
-  return Load(MachineType::AnyTagged(), fixed_array, entry_offset);
-}
-
 Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
   return Load(MachineType::AnyTagged(), object,
               IntPtrConstant(offset - kHeapObjectTag));
@@ -285,7 +393,7 @@
 Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
   Node* offset =
       IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
-                Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+                IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
   return Load(MachineType::AnyTagged(), context, offset);
 }
 
@@ -293,7 +401,7 @@
                                              Node* value) {
   Node* offset =
       IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
-                Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+                IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
   return Store(MachineRepresentation::kTagged, context, offset, value);
 }
 
@@ -311,8 +419,6 @@
 void InterpreterAssembler::CallPrologue() {
   StoreRegister(SmiTag(BytecodeOffset()),
                 InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
-  StoreRegister(BytecodeArrayTaggedPointer(),
-                InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer);
 
   if (FLAG_debug_code && !disable_stack_check_across_call_) {
     DCHECK(stack_pointer_before_call_ == nullptr);
@@ -368,7 +474,7 @@
   Node* function = IntPtrAdd(function_table, function_offset);
   Node* function_entry =
       Load(MachineType::Pointer(), function,
-           Int32Constant(offsetof(Runtime::Function, entry)));
+           IntPtrConstant(offsetof(Runtime::Function, entry)));
 
   return CallStub(callable.descriptor(), code_target, context, arg_count,
                   first_arg, function_entry, result_size);
@@ -405,7 +511,7 @@
 }
 
 Node* InterpreterAssembler::Advance(int delta) {
-  return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
+  return IntPtrAdd(BytecodeOffset(), IntPtrConstant(delta));
 }
 
 Node* InterpreterAssembler::Advance(Node* delta) {
@@ -438,18 +544,21 @@
 }
 
 void InterpreterAssembler::Dispatch() {
-  DispatchTo(Advance(Bytecodes::Size(bytecode_)));
+  DispatchTo(Advance(Bytecodes::Size(bytecode_, operand_scale_)));
 }
 
 void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
   Node* target_bytecode = Load(
       MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
+  if (kPointerSize == 8) {
+    target_bytecode = ChangeUint32ToUint64(target_bytecode);
+  }
 
   // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
   // from code object on every dispatch.
   Node* target_code_object =
       Load(MachineType::Pointer(), DispatchTableRawPointer(),
-           Word32Shl(target_bytecode, Int32Constant(kPointerSizeLog2)));
+           WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
 
   DispatchToBytecodeHandler(target_code_object, new_bytecode_offset);
 }
@@ -461,12 +570,46 @@
   }
 
   InterpreterDispatchDescriptor descriptor(isolate());
-  Node* args[] = {GetAccumulator(),          RegisterFileRawPointer(),
+  Node* args[] = {GetAccumulatorUnchecked(), RegisterFileRawPointer(),
                   bytecode_offset,           BytecodeArrayTaggedPointer(),
                   DispatchTableRawPointer(), GetContext()};
   TailCall(descriptor, handler, args, 0);
 }
 
+void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
+  // Dispatching a wide bytecode requires treating the prefix
+  // bytecode a base pointer into the dispatch table and dispatching
+  // the bytecode that follows relative to this base.
+  //
+  //   Indices 0-255 correspond to bytecodes with operand_scale == 0
+  //   Indices 256-511 correspond to bytecodes with operand_scale == 1
+  //   Indices 512-767 correspond to bytecodes with operand_scale == 2
+  Node* next_bytecode_offset = Advance(1);
+  Node* next_bytecode = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+                             next_bytecode_offset);
+  if (kPointerSize == 8) {
+    next_bytecode = ChangeUint32ToUint64(next_bytecode);
+  }
+  Node* base_index;
+  switch (operand_scale) {
+    case OperandScale::kDouble:
+      base_index = IntPtrConstant(1 << kBitsPerByte);
+      break;
+    case OperandScale::kQuadruple:
+      base_index = IntPtrConstant(2 << kBitsPerByte);
+      break;
+    default:
+      UNREACHABLE();
+      base_index = nullptr;
+  }
+  Node* target_index = IntPtrAdd(base_index, next_bytecode);
+  Node* target_code_object =
+      Load(MachineType::Pointer(), DispatchTableRawPointer(),
+           WordShl(target_index, kPointerSizeLog2));
+
+  DispatchToBytecodeHandler(target_code_object, next_bytecode_offset);
+}
+
 void InterpreterAssembler::InterpreterReturn() {
   // TODO(rmcilroy): Investigate whether it is worth supporting self
   // optimization of primitive functions like FullCodegen.
@@ -505,27 +648,29 @@
 void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
   disable_stack_check_across_call_ = true;
   Node* abort_id = SmiTag(Int32Constant(bailout_reason));
-  Node* ret_value = CallRuntime(Runtime::kAbort, GetContext(), abort_id);
+  CallRuntime(Runtime::kAbort, GetContext(), abort_id);
   disable_stack_check_across_call_ = false;
-  // Unreached, but keeps turbofan happy.
-  Return(ret_value);
 }
 
 void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
                                                BailoutReason bailout_reason) {
   CodeStubAssembler::Label match(this);
   CodeStubAssembler::Label no_match(this);
+  CodeStubAssembler::Label end(this);
 
   Node* condition = WordEqual(lhs, rhs);
   Branch(condition, &match, &no_match);
   Bind(&no_match);
   Abort(bailout_reason);
+  Goto(&end);
   Bind(&match);
+  Goto(&end);
+  Bind(&end);
 }
 
 void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
   CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
-              SmiTag(BytecodeOffset()), GetAccumulator());
+              SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
 }
 
 // static
@@ -534,7 +679,8 @@
   return false;
 #elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
   return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
-#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87 || \
+    V8_TARGET_ARCH_S390
   return true;
 #else
 #error "Unknown Architecture"
diff --git a/src/interpreter/interpreter-assembler.h b/src/interpreter/interpreter-assembler.h
index 9600dfb..86ecea5 100644
--- a/src/interpreter/interpreter-assembler.h
+++ b/src/interpreter/interpreter-assembler.h
@@ -19,12 +19,16 @@
 
 class InterpreterAssembler : public compiler::CodeStubAssembler {
  public:
-  InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode);
+  InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode,
+                       OperandScale operand_scale);
   virtual ~InterpreterAssembler();
 
   // Returns the count immediate for bytecode operand |operand_index| in the
   // current bytecode.
   compiler::Node* BytecodeOperandCount(int operand_index);
+  // Returns the 8-bit flag for bytecode operand |operand_index| in the
+  // current bytecode.
+  compiler::Node* BytecodeOperandFlag(int operand_index);
   // Returns the index immediate for bytecode operand |operand_index| in the
   // current bytecode.
   compiler::Node* BytecodeOperandIdx(int operand_index);
@@ -34,6 +38,9 @@
   // Returns the register index for bytecode operand |operand_index| in the
   // current bytecode.
   compiler::Node* BytecodeOperandReg(int operand_index);
+  // Returns the runtime id immediate for bytecode operand
+  // |operand_index| in the current bytecode.
+  compiler::Node* BytecodeOperandRuntimeId(int operand_index);
 
   // Accumulator.
   compiler::Node* GetAccumulator();
@@ -62,9 +69,6 @@
   // Load constant at |index| in the constant pool.
   compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
 
-  // Load an element from a fixed array on the heap.
-  compiler::Node* LoadFixedArrayElement(compiler::Node* fixed_array, int index);
-
   // Load a field from an object on the heap.
   compiler::Node* LoadObjectField(compiler::Node* object, int offset);
 
@@ -139,10 +143,14 @@
     DispatchToBytecodeHandler(handler, BytecodeOffset());
   }
 
+  // Dispatch bytecode as wide operand variant.
+  void DispatchWide(OperandScale operand_scale);
+
   // Abort with the given bailout reason.
   void Abort(BailoutReason bailout_reason);
 
  protected:
+  Bytecode bytecode() const { return bytecode_; }
   static bool TargetSupportsUnalignedAccess();
 
  private:
@@ -155,6 +163,11 @@
   // Returns a raw pointer to first entry in the interpreter dispatch table.
   compiler::Node* DispatchTableRawPointer();
 
+  // Returns the accumulator value without checking whether bytecode
+  // uses it. This is intended to be used only in dispatch and in
+  // tracing as these need to bypass accumulator use validity checks.
+  compiler::Node* GetAccumulatorUnchecked();
+
   // Saves and restores interpreter bytecode offset to the interpreter stack
   // frame when performing a call.
   void CallPrologue() override;
@@ -170,10 +183,28 @@
   // Returns the offset of register |index| relative to RegisterFilePointer().
   compiler::Node* RegisterFrameOffset(compiler::Node* index);
 
-  compiler::Node* BytecodeOperand(int operand_index);
-  compiler::Node* BytecodeOperandSignExtended(int operand_index);
-  compiler::Node* BytecodeOperandShort(int operand_index);
-  compiler::Node* BytecodeOperandShortSignExtended(int operand_index);
+  // Returns the offset of an operand relative to the current bytecode offset.
+  compiler::Node* OperandOffset(int operand_index);
+
+  // Returns a value built from an sequence of bytes in the bytecode
+  // array starting at |relative_offset| from the current bytecode.
+  // The |result_type| determines the size and signedness.  of the
+  // value read. This method should only be used on architectures that
+  // do not support unaligned memory accesses.
+  compiler::Node* BytecodeOperandReadUnaligned(int relative_offset,
+                                               MachineType result_type);
+
+  compiler::Node* BytecodeOperandUnsignedByte(int operand_index);
+  compiler::Node* BytecodeOperandSignedByte(int operand_index);
+  compiler::Node* BytecodeOperandUnsignedShort(int operand_index);
+  compiler::Node* BytecodeOperandSignedShort(int operand_index);
+  compiler::Node* BytecodeOperandUnsignedQuad(int operand_index);
+  compiler::Node* BytecodeOperandSignedQuad(int operand_index);
+
+  compiler::Node* BytecodeSignedOperand(int operand_index,
+                                        OperandSize operand_size);
+  compiler::Node* BytecodeUnsignedOperand(int operand_index,
+                                          OperandSize operand_size);
 
   // Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
   // update BytecodeOffset() itself.
@@ -187,8 +218,12 @@
   void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
                            BailoutReason bailout_reason);
 
+  OperandScale operand_scale() const { return operand_scale_; }
+
   Bytecode bytecode_;
+  OperandScale operand_scale_;
   CodeStubAssembler::Variable accumulator_;
+  AccumulatorUse accumulator_use_;
   CodeStubAssembler::Variable context_;
   CodeStubAssembler::Variable bytecode_array_;
 
diff --git a/src/interpreter/interpreter-intrinsics.cc b/src/interpreter/interpreter-intrinsics.cc
new file mode 100644
index 0000000..6d9917d
--- /dev/null
+++ b/src/interpreter/interpreter-intrinsics.cc
@@ -0,0 +1,159 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/interpreter-intrinsics.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+using compiler::Node;
+
+#define __ assembler_->
+
+IntrinsicsHelper::IntrinsicsHelper(InterpreterAssembler* assembler)
+    : assembler_(assembler) {}
+
+bool IntrinsicsHelper::IsSupported(Runtime::FunctionId function_id) {
+  switch (function_id) {
+#define SUPPORTED(name, lower_case, count) case Runtime::kInline##name:
+    INTRINSICS_LIST(SUPPORTED)
+    return true;
+#undef SUPPORTED
+    default:
+      return false;
+  }
+}
+
+Node* IntrinsicsHelper::InvokeIntrinsic(Node* function_id, Node* context,
+                                        Node* first_arg_reg, Node* arg_count) {
+  InterpreterAssembler::Label abort(assembler_), end(assembler_);
+  InterpreterAssembler::Variable result(assembler_,
+                                        MachineRepresentation::kTagged);
+
+#define MAKE_LABEL(name, lower_case, count) \
+  InterpreterAssembler::Label lower_case(assembler_);
+  INTRINSICS_LIST(MAKE_LABEL)
+#undef MAKE_LABEL
+
+#define LABEL_POINTER(name, lower_case, count) &lower_case,
+  InterpreterAssembler::Label* labels[] = {INTRINSICS_LIST(LABEL_POINTER)};
+#undef LABEL_POINTER
+
+#define CASE(name, lower_case, count) \
+  static_cast<int32_t>(Runtime::kInline##name),
+  int32_t cases[] = {INTRINSICS_LIST(CASE)};
+#undef CASE
+
+  __ Switch(function_id, &abort, cases, labels, arraysize(cases));
+#define HANDLE_CASE(name, lower_case, expected_arg_count)   \
+  __ Bind(&lower_case);                                     \
+  if (FLAG_debug_code) {                                    \
+    AbortIfArgCountMismatch(expected_arg_count, arg_count); \
+  }                                                         \
+  result.Bind(name(first_arg_reg));                         \
+  __ Goto(&end);
+  INTRINSICS_LIST(HANDLE_CASE)
+#undef HANDLE_CASE
+
+  __ Bind(&abort);
+  __ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic);
+  result.Bind(__ UndefinedConstant());
+  __ Goto(&end);
+
+  __ Bind(&end);
+  return result.value();
+}
+
+Node* IntrinsicsHelper::CompareInstanceType(Node* map, int type,
+                                            InstanceTypeCompareMode mode) {
+  InterpreterAssembler::Variable return_value(assembler_,
+                                              MachineRepresentation::kTagged);
+  Node* instance_type = __ LoadInstanceType(map);
+
+  InterpreterAssembler::Label if_true(assembler_), if_false(assembler_),
+      end(assembler_);
+  Node* condition;
+  if (mode == kInstanceTypeEqual) {
+    condition = __ Word32Equal(instance_type, __ Int32Constant(type));
+  } else {
+    DCHECK(mode == kInstanceTypeGreaterThanOrEqual);
+    condition =
+        __ Int32GreaterThanOrEqual(instance_type, __ Int32Constant(type));
+  }
+  __ Branch(condition, &if_true, &if_false);
+
+  __ Bind(&if_true);
+  return_value.Bind(__ BooleanConstant(true));
+  __ Goto(&end);
+
+  __ Bind(&if_false);
+  return_value.Bind(__ BooleanConstant(false));
+  __ Goto(&end);
+
+  __ Bind(&end);
+  return return_value.value();
+}
+
+Node* IntrinsicsHelper::IsJSReceiver(Node* input) {
+  InterpreterAssembler::Variable return_value(assembler_,
+                                              MachineRepresentation::kTagged);
+
+  InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
+      end(assembler_);
+  Node* arg = __ LoadRegister(input);
+
+  __ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
+  __ Bind(&if_smi);
+  return_value.Bind(__ BooleanConstant(false));
+  __ Goto(&end);
+
+  __ Bind(&if_not_smi);
+  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+  return_value.Bind(CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE,
+                                        kInstanceTypeGreaterThanOrEqual));
+  __ Goto(&end);
+
+  __ Bind(&end);
+  return return_value.value();
+}
+
+Node* IntrinsicsHelper::IsArray(Node* input) {
+  InterpreterAssembler::Variable return_value(assembler_,
+                                              MachineRepresentation::kTagged);
+
+  InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
+      end(assembler_);
+  Node* arg = __ LoadRegister(input);
+
+  __ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
+  __ Bind(&if_smi);
+  return_value.Bind(__ BooleanConstant(false));
+  __ Goto(&end);
+
+  __ Bind(&if_not_smi);
+  return_value.Bind(
+      CompareInstanceType(arg, JS_ARRAY_TYPE, kInstanceTypeEqual));
+  __ Goto(&end);
+
+  __ Bind(&end);
+  return return_value.value();
+}
+
+void IntrinsicsHelper::AbortIfArgCountMismatch(int expected, Node* actual) {
+  InterpreterAssembler::Label match(assembler_), mismatch(assembler_),
+      end(assembler_);
+  Node* comparison = __ Word32Equal(actual, __ Int32Constant(expected));
+  __ Branch(comparison, &match, &mismatch);
+  __ Bind(&mismatch);
+  __ Abort(kWrongArgumentCountForInvokeIntrinsic);
+  __ Goto(&end);
+  __ Bind(&match);
+  __ Goto(&end);
+  __ Bind(&end);
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/interpreter-intrinsics.h b/src/interpreter/interpreter-intrinsics.h
new file mode 100644
index 0000000..e27c678
--- /dev/null
+++ b/src/interpreter/interpreter-intrinsics.h
@@ -0,0 +1,62 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_INTERPRETER_INTRINSICS_H_
+#define V8_INTERPRETER_INTERPRETER_INTRINSICS_H_
+
+#include "src/allocation.h"
+#include "src/base/smart-pointers.h"
+#include "src/builtins.h"
+#include "src/frames.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter-assembler.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class Node;
+}  // namespace compiler
+
+#define INTRINSICS_LIST(V)           \
+  V(IsJSReceiver, is_js_receiver, 1) \
+  V(IsArray, is_array, 1)
+
+namespace interpreter {
+
+class IntrinsicsHelper {
+ public:
+  explicit IntrinsicsHelper(InterpreterAssembler* assembler);
+
+  compiler::Node* InvokeIntrinsic(compiler::Node* function_id,
+                                  compiler::Node* context,
+                                  compiler::Node* first_arg_reg,
+                                  compiler::Node* arg_count);
+
+  static bool IsSupported(Runtime::FunctionId function_id);
+
+ private:
+  enum InstanceTypeCompareMode {
+    kInstanceTypeEqual,
+    kInstanceTypeGreaterThanOrEqual
+  };
+  compiler::Node* CompareInstanceType(compiler::Node* map, int type,
+                                      InstanceTypeCompareMode mode);
+  void AbortIfArgCountMismatch(int expected, compiler::Node* actual);
+  InterpreterAssembler* assembler_;
+
+#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
+  compiler::Node* name(compiler::Node* input);
+  INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER)
+#undef DECLARE_INTRINSIC_HELPER
+
+  DISALLOW_COPY_AND_ASSIGN(IntrinsicsHelper);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index eb88342..5084300 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -11,6 +11,8 @@
 #include "src/interpreter/bytecode-generator.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/interpreter/interpreter-assembler.h"
+#include "src/interpreter/interpreter-intrinsics.h"
+#include "src/log.h"
 #include "src/zone.h"
 
 namespace v8 {
@@ -22,30 +24,69 @@
 #define __ assembler->
 
 Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
-  memset(&dispatch_table_, 0, sizeof(dispatch_table_));
+  memset(dispatch_table_, 0, sizeof(dispatch_table_));
 }
 
 void Interpreter::Initialize() {
   DCHECK(FLAG_ignition);
   if (IsDispatchTableInitialized()) return;
-  Zone zone;
+  Zone zone(isolate_->allocator());
   HandleScope scope(isolate_);
 
-#define GENERATE_CODE(Name, ...)                                        \
-  {                                                                     \
-    InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name); \
-    Do##Name(&assembler);                                               \
-    Handle<Code> code = assembler.GenerateCode();                       \
-    TraceCodegen(code, #Name);                                          \
-    dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] = *code;      \
+  // Generate bytecode handlers for all bytecodes and scales.
+  for (OperandScale operand_scale = OperandScale::kSingle;
+       operand_scale <= OperandScale::kMaxValid;
+       operand_scale = Bytecodes::NextOperandScale(operand_scale)) {
+#define GENERATE_CODE(Name, ...)                                               \
+  {                                                                            \
+    if (Bytecodes::BytecodeHasHandler(Bytecode::k##Name, operand_scale)) {     \
+      InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name,       \
+                                     operand_scale);                           \
+      Do##Name(&assembler);                                                    \
+      Handle<Code> code = assembler.GenerateCode();                            \
+      size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale);  \
+      dispatch_table_[index] = *code;                                          \
+      TraceCodegen(code);                                                      \
+      LOG_CODE_EVENT(                                                          \
+          isolate_,                                                            \
+          CodeCreateEvent(                                                     \
+              Logger::BYTECODE_HANDLER_TAG, AbstractCode::cast(*code),         \
+              Bytecodes::ToString(Bytecode::k##Name, operand_scale).c_str())); \
+    }                                                                          \
   }
-  BYTECODE_LIST(GENERATE_CODE)
+    BYTECODE_LIST(GENERATE_CODE)
 #undef GENERATE_CODE
+  }
+
+  // Fill unused entries will the illegal bytecode handler.
+  size_t illegal_index =
+      GetDispatchTableIndex(Bytecode::kIllegal, OperandScale::kSingle);
+  for (size_t index = 0; index < arraysize(dispatch_table_); ++index) {
+    if (dispatch_table_[index] == nullptr) {
+      dispatch_table_[index] = dispatch_table_[illegal_index];
+    }
+  }
 }
 
-Code* Interpreter::GetBytecodeHandler(Bytecode bytecode) {
+Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
+                                      OperandScale operand_scale) {
   DCHECK(IsDispatchTableInitialized());
-  return dispatch_table_[Bytecodes::ToByte(bytecode)];
+  DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
+  size_t index = GetDispatchTableIndex(bytecode, operand_scale);
+  return dispatch_table_[index];
+}
+
+// static
+size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
+                                          OperandScale operand_scale) {
+  static const size_t kEntriesPerOperandScale = 1u << kBitsPerByte;
+  size_t index = static_cast<size_t>(bytecode);
+  OperandScale current_scale = OperandScale::kSingle;
+  while (current_scale != operand_scale) {
+    index += kEntriesPerOperandScale;
+    current_scale = Bytecodes::NextOperandScale(current_scale);
+  }
+  return index;
 }
 
 void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
@@ -62,6 +103,9 @@
 }
 
 bool Interpreter::MakeBytecode(CompilationInfo* info) {
+  TimerEventScope<TimerEventCompileIgnition> timer(info->isolate());
+  TRACE_EVENT0("v8", "V8.CompileIgnition");
+
   if (FLAG_print_bytecode || FLAG_print_source || FLAG_print_ast) {
     OFStream os(stdout);
     base::SmartArrayPointer<char> name = info->GetDebugName();
@@ -88,8 +132,10 @@
 #endif  // DEBUG
 
   BytecodeGenerator generator(info->isolate(), info->zone());
-  info->EnsureFeedbackVector();
   Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
+
+  if (generator.HasStackOverflow()) return false;
+
   if (FLAG_print_bytecode) {
     OFStream os(stdout);
     bytecodes->Print(os);
@@ -102,23 +148,36 @@
 }
 
 bool Interpreter::IsDispatchTableInitialized() {
-  if (FLAG_trace_ignition) {
-    // Regenerate table to add bytecode tracing operations.
+  if (FLAG_trace_ignition || FLAG_trace_ignition_codegen) {
+    // Regenerate table to add bytecode tracing operations
+    // or to print the assembly code generated by TurboFan.
     return false;
   }
   return dispatch_table_[0] != nullptr;
 }
 
-void Interpreter::TraceCodegen(Handle<Code> code, const char* name) {
+void Interpreter::TraceCodegen(Handle<Code> code) {
 #ifdef ENABLE_DISASSEMBLER
   if (FLAG_trace_ignition_codegen) {
     OFStream os(stdout);
-    code->Disassemble(name, os);
+    code->Disassemble(nullptr, os);
     os << std::flush;
   }
 #endif  // ENABLE_DISASSEMBLER
 }
 
+const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) {
+#ifdef ENABLE_DISASSEMBLER
+#define RETURN_NAME(Name, ...)                                         \
+  if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == code) { \
+    return #Name;                                                      \
+  }
+  BYTECODE_LIST(RETURN_NAME)
+#undef RETURN_NAME
+#endif  // ENABLE_DISASSEMBLER
+  return nullptr;
+}
+
 // LdaZero
 //
 // Load literal '0' into the accumulator.
@@ -128,11 +187,10 @@
   __ Dispatch();
 }
 
-
-// LdaSmi8 <imm8>
+// LdaSmi <imm>
 //
-// Load an 8-bit integer literal into the accumulator as a Smi.
-void Interpreter::DoLdaSmi8(InterpreterAssembler* assembler) {
+// Load an integer literal into the accumulator as a Smi.
+void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) {
   Node* raw_int = __ BytecodeOperandImm(0);
   Node* smi_int = __ SmiTag(raw_int);
   __ SetAccumulator(smi_int);
@@ -154,15 +212,6 @@
   DoLoadConstant(assembler);
 }
 
-
-// LdaConstantWide <idx>
-//
-// Load constant literal at |idx| in the constant pool into the accumulator.
-void Interpreter::DoLdaConstantWide(InterpreterAssembler* assembler) {
-  DoLoadConstant(assembler);
-}
-
-
 // LdaUndefined
 //
 // Load Undefined into the accumulator.
@@ -248,13 +297,6 @@
 }
 
 
-// MovWide <src> <dst>
-//
-// Stores the value of register <src> to register <dst>.
-void Interpreter::DoMovWide(InterpreterAssembler* assembler) {
-  DoMov(assembler);
-}
-
 void Interpreter::DoLoadGlobal(Callable ic, InterpreterAssembler* assembler) {
   // Get the global object.
   Node* context = __ GetContext();
@@ -295,27 +337,6 @@
   DoLoadGlobal(ic, assembler);
 }
 
-// LdaGlobalWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> outside of a typeof.
-void Interpreter::DoLdaGlobalWide(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-// LdaGlobalInsideTypeofWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> inside of a typeof.
-void Interpreter::DoLdaGlobalInsideTypeofWide(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
 void Interpreter::DoStoreGlobal(Callable ic, InterpreterAssembler* assembler) {
   // Get the global object.
   Node* context = __ GetContext();
@@ -333,7 +354,6 @@
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
   __ CallStub(ic.descriptor(), code_target, context, global, name, value,
               smi_slot, type_feedback_vector);
-
   __ Dispatch();
 }
 
@@ -359,29 +379,6 @@
   DoStoreGlobal(ic, assembler);
 }
 
-
-// StaGlobalSloppyWide <name_index> <slot>
-//
-// Store the value in the accumulator into the global with name in constant pool
-// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoStaGlobalSloppyWide(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
-  DoStoreGlobal(ic, assembler);
-}
-
-
-// StaGlobalStrictWide <name_index> <slot>
-//
-// Store the value in the accumulator into the global with name in constant pool
-// entry <name_index> using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoStaGlobalStrictWide(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
-  DoStoreGlobal(ic, assembler);
-}
-
-
 // LdaContextSlot <context> <slot_index>
 //
 // Load the object in |slot_index| of |context| into the accumulator.
@@ -394,15 +391,6 @@
   __ Dispatch();
 }
 
-
-// LdaContextSlotWide <context> <slot_index>
-//
-// Load the object in |slot_index| of |context| into the accumulator.
-void Interpreter::DoLdaContextSlotWide(InterpreterAssembler* assembler) {
-  DoLdaContextSlot(assembler);
-}
-
-
 // StaContextSlot <context> <slot_index>
 //
 // Stores the object in the accumulator into |slot_index| of |context|.
@@ -415,14 +403,6 @@
   __ Dispatch();
 }
 
-
-// StaContextSlot <context> <slot_index>
-//
-// Stores the object in the accumulator into |slot_index| of |context|.
-void Interpreter::DoStaContextSlotWide(InterpreterAssembler* assembler) {
-  DoStaContextSlot(assembler);
-}
-
 void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
                                    InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
@@ -433,7 +413,6 @@
   __ Dispatch();
 }
 
-
 // LdaLookupSlot <name_index>
 //
 // Lookup the object with the name in constant pool entry |name_index|
@@ -442,7 +421,6 @@
   DoLoadLookupSlot(Runtime::kLoadLookupSlot, assembler);
 }
 
-
 // LdaLookupSlotInsideTypeof <name_index>
 //
 // Lookup the object with the name in constant pool entry |name_index|
@@ -451,25 +429,6 @@
   DoLoadLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
 }
 
-
-// LdaLookupSlotWide <name_index>
-//
-// Lookup the object with the name in constant pool entry |name_index|
-// dynamically.
-void Interpreter::DoLdaLookupSlotWide(InterpreterAssembler* assembler) {
-  DoLdaLookupSlot(assembler);
-}
-
-
-// LdaLookupSlotInsideTypeofWide <name_index>
-//
-// Lookup the object with the name in constant pool entry |name_index|
-// dynamically without causing a NoReferenceError.
-void Interpreter::DoLdaLookupSlotInsideTypeofWide(
-    InterpreterAssembler* assembler) {
-  DoLdaLookupSlotInsideTypeof(assembler);
-}
-
 void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
                                     InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
@@ -484,7 +443,6 @@
   __ Dispatch();
 }
 
-
 // StaLookupSlotSloppy <name_index>
 //
 // Store the object in accumulator to the object with the name in constant
@@ -502,24 +460,6 @@
   DoStoreLookupSlot(LanguageMode::STRICT, assembler);
 }
 
-
-// StaLookupSlotSloppyWide <name_index>
-//
-// Store the object in accumulator to the object with the name in constant
-// pool entry |name_index| in sloppy mode.
-void Interpreter::DoStaLookupSlotSloppyWide(InterpreterAssembler* assembler) {
-  DoStaLookupSlotSloppy(assembler);
-}
-
-
-// StaLookupSlotStrictWide <name_index>
-//
-// Store the object in accumulator to the object with the name in constant
-// pool entry |name_index| in strict mode.
-void Interpreter::DoStaLookupSlotStrictWide(InterpreterAssembler* assembler) {
-  DoStaLookupSlotStrict(assembler);
-}
-
 void Interpreter::DoLoadIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* register_index = __ BytecodeOperandReg(0);
@@ -546,17 +486,6 @@
   DoLoadIC(ic, assembler);
 }
 
-// LoadICWide <object> <name_index> <slot>
-//
-// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
-// constant pool entry <name_index>.
-void Interpreter::DoLoadICWide(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   UNINITIALIZED);
-  DoLoadIC(ic, assembler);
-}
-
-
 void Interpreter::DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* reg_index = __ BytecodeOperandReg(0);
@@ -582,17 +511,6 @@
   DoKeyedLoadIC(ic, assembler);
 }
 
-// KeyedLoadICWide <object> <slot>
-//
-// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
-// in the accumulator.
-void Interpreter::DoKeyedLoadICWide(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, UNINITIALIZED);
-  DoKeyedLoadIC(ic, assembler);
-}
-
-
 void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* object_reg_index = __ BytecodeOperandReg(0);
@@ -633,30 +551,6 @@
   DoStoreIC(ic, assembler);
 }
 
-
-// StoreICSloppyWide <object> <name_index> <slot>
-//
-// Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
-// the name in constant pool entry <name_index> with the value in the
-// accumulator.
-void Interpreter::DoStoreICSloppyWide(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
-  DoStoreIC(ic, assembler);
-}
-
-
-// StoreICStrictWide <object> <name_index> <slot>
-//
-// Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
-// the name in constant pool entry <name_index> with the value in the
-// accumulator.
-void Interpreter::DoStoreICStrictWide(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
-  DoStoreIC(ic, assembler);
-}
-
 void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* object_reg_index = __ BytecodeOperandReg(0);
@@ -695,28 +589,6 @@
   DoKeyedStoreIC(ic, assembler);
 }
 
-
-// KeyedStoreICSloppyWide <object> <key> <slot>
-//
-// Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
-// and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICSloppyWide(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
-  DoKeyedStoreIC(ic, assembler);
-}
-
-
-// KeyedStoreICStoreWide <object> <key> <slot>
-//
-// Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
-// and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICStrictWide(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
-  DoKeyedStoreIC(ic, assembler);
-}
-
 // PushContext <context>
 //
 // Saves the current context in <context>, and pushes the accumulator as the
@@ -741,6 +613,20 @@
   __ Dispatch();
 }
 
+void Interpreter::DoBinaryOp(Callable callable,
+                             InterpreterAssembler* assembler) {
+  // TODO(bmeurer): Collect definition side type feedback for various
+  // binary operations.
+  Node* target = __ HeapConstant(callable.code());
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* lhs = __ LoadRegister(reg_index);
+  Node* rhs = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* result = __ CallStub(callable.descriptor(), target, context, lhs, rhs);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
 void Interpreter::DoBinaryOp(Runtime::FunctionId function_id,
                              InterpreterAssembler* assembler) {
   // TODO(rmcilroy): Call ICs which back-patch bytecode with type specialized
@@ -759,7 +645,7 @@
 //
 // Add register <src> to accumulator.
 void Interpreter::DoAdd(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kAdd, assembler);
+  DoBinaryOp(CodeFactory::Add(isolate_), assembler);
 }
 
 
@@ -767,7 +653,7 @@
 //
 // Subtract register <src> from accumulator.
 void Interpreter::DoSub(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kSubtract, assembler);
+  DoBinaryOp(CodeFactory::Subtract(isolate_), assembler);
 }
 
 
@@ -799,7 +685,7 @@
 //
 // BitwiseOr register <src> to accumulator.
 void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kBitwiseOr, assembler);
+  DoBinaryOp(CodeFactory::BitwiseOr(isolate_), assembler);
 }
 
 
@@ -807,7 +693,7 @@
 //
 // BitwiseXor register <src> to accumulator.
 void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kBitwiseXor, assembler);
+  DoBinaryOp(CodeFactory::BitwiseXor(isolate_), assembler);
 }
 
 
@@ -815,7 +701,7 @@
 //
 // BitwiseAnd register <src> to accumulator.
 void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kBitwiseAnd, assembler);
+  DoBinaryOp(CodeFactory::BitwiseAnd(isolate_), assembler);
 }
 
 
@@ -883,24 +769,40 @@
 // Perform logical-not on the accumulator, first casting the
 // accumulator to a boolean value if required.
 void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::ToBoolean(isolate_);
+  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
   Node* context = __ GetContext();
-  Node* result =
-      __ CallRuntime(Runtime::kInterpreterLogicalNot, context, accumulator);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  Node* to_boolean_value =
+      __ CallStub(callable.descriptor(), target, context, accumulator);
+  InterpreterAssembler::Label if_true(assembler), if_false(assembler);
+  Node* true_value = __ BooleanConstant(true);
+  Node* false_value = __ BooleanConstant(false);
+  Node* condition = __ WordEqual(to_boolean_value, true_value);
+  __ Branch(condition, &if_true, &if_false);
+  __ Bind(&if_true);
+  {
+    __ SetAccumulator(false_value);
+    __ Dispatch();
+  }
+  __ Bind(&if_false);
+  {
+    __ SetAccumulator(true_value);
+    __ Dispatch();
+  }
 }
 
-
 // TypeOf
 //
 // Load the accumulator with the string representating type of the
 // object in the accumulator.
 void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::Typeof(isolate_);
+  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* result =
-      __ CallRuntime(Runtime::kInterpreterTypeOf, context, accumulator);
+      __ CallStub(callable.descriptor(), target, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -960,15 +862,6 @@
   DoJSCall(assembler, TailCallMode::kDisallow);
 }
 
-
-// CallWide <callable> <receiver> <arg_count>
-//
-// Call a JSfunction or Callable in |callable| with the |receiver| and
-// |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallWide(InterpreterAssembler* assembler) {
-  DoJSCall(assembler, TailCallMode::kDisallow);
-}
-
 // TailCall <callable> <receiver> <arg_count>
 //
 // Tail call a JSfunction or Callable in |callable| with the |receiver| and
@@ -977,16 +870,8 @@
   DoJSCall(assembler, TailCallMode::kAllow);
 }
 
-// TailCallWide <callable> <receiver> <arg_count>
-//
-// Tail call a JSfunction or Callable in |callable| with the |receiver| and
-// |arg_count| arguments in subsequent registers.
-void Interpreter::DoTailCallWide(InterpreterAssembler* assembler) {
-  DoJSCall(assembler, TailCallMode::kAllow);
-}
-
 void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) {
-  Node* function_id = __ BytecodeOperandIdx(0);
+  Node* function_id = __ BytecodeOperandRuntimeId(0);
   Node* first_arg_reg = __ BytecodeOperandReg(1);
   Node* first_arg = __ RegisterLocation(first_arg_reg);
   Node* args_count = __ BytecodeOperandCount(2);
@@ -1006,19 +891,26 @@
   DoCallRuntimeCommon(assembler);
 }
 
-
-// CallRuntime <function_id> <first_arg> <arg_count>
+// InvokeIntrinsic <function_id> <first_arg> <arg_count>
 //
-// Call the runtime function |function_id| with the first argument in
-// register |first_arg| and |arg_count| arguments in subsequent
-// registers.
-void Interpreter::DoCallRuntimeWide(InterpreterAssembler* assembler) {
-  DoCallRuntimeCommon(assembler);
+// Implements the semantic equivalent of calling the runtime function
+// |function_id| with the first argument in |first_arg| and |arg_count|
+// arguments in subsequent registers.
+void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) {
+  Node* function_id = __ BytecodeOperandRuntimeId(0);
+  Node* first_arg_reg = __ BytecodeOperandReg(1);
+  Node* arg_count = __ BytecodeOperandCount(2);
+  Node* context = __ GetContext();
+  IntrinsicsHelper helper(assembler);
+  Node* result =
+      helper.InvokeIntrinsic(function_id, context, first_arg_reg, arg_count);
+  __ SetAccumulator(result);
+  __ Dispatch();
 }
 
 void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) {
   // Call the runtime function.
-  Node* function_id = __ BytecodeOperandIdx(0);
+  Node* function_id = __ BytecodeOperandRuntimeId(0);
   Node* first_arg_reg = __ BytecodeOperandReg(1);
   Node* first_arg = __ RegisterLocation(first_arg_reg);
   Node* args_count = __ BytecodeOperandCount(2);
@@ -1047,17 +939,6 @@
   DoCallRuntimeForPairCommon(assembler);
 }
 
-
-// CallRuntimeForPairWide <function_id> <first_arg> <arg_count> <first_return>
-//
-// Call the runtime function |function_id| which returns a pair, with the
-// first argument in register |first_arg| and |arg_count| arguments in
-// subsequent registers. Returns the result in <first_return> and
-// <first_return + 1>
-void Interpreter::DoCallRuntimeForPairWide(InterpreterAssembler* assembler) {
-  DoCallRuntimeForPairCommon(assembler);
-}
-
 void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) {
   Node* context_index = __ BytecodeOperandIdx(0);
   Node* receiver_reg = __ BytecodeOperandReg(1);
@@ -1088,15 +969,6 @@
   DoCallJSRuntimeCommon(assembler);
 }
 
-
-// CallJSRuntimeWide <context_index> <receiver> <arg_count>
-//
-// Call the JS runtime function that has the |context_index| with the receiver
-// in register |receiver| and |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallJSRuntimeWide(InterpreterAssembler* assembler) {
-  DoCallJSRuntimeCommon(assembler);
-}
-
 void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) {
   Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
   Node* new_target = __ GetAccumulator();
@@ -1123,23 +995,11 @@
   DoCallConstruct(assembler);
 }
 
-
-// NewWide <constructor> <first_arg> <arg_count>
-//
-// Call operator new with |constructor| and the first argument in
-// register |first_arg| and |arg_count| arguments in subsequent
-// registers. The new.target is in the accumulator.
-//
-void Interpreter::DoNewWide(InterpreterAssembler* assembler) {
-  DoCallConstruct(assembler);
-}
-
-
 // TestEqual <src>
 //
 // Test if the value in the <src> register equals the accumulator.
 void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kEqual, assembler);
+  DoBinaryOp(CodeFactory::Equal(isolate_), assembler);
 }
 
 
@@ -1147,7 +1007,7 @@
 //
 // Test if the value in the <src> register is not equal to the accumulator.
 void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kNotEqual, assembler);
+  DoBinaryOp(CodeFactory::NotEqual(isolate_), assembler);
 }
 
 
@@ -1155,16 +1015,7 @@
 //
 // Test if the value in the <src> register is strictly equal to the accumulator.
 void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kStrictEqual, assembler);
-}
-
-
-// TestNotEqualStrict <src>
-//
-// Test if the value in the <src> register is not strictly equal to the
-// accumulator.
-void Interpreter::DoTestNotEqualStrict(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kStrictNotEqual, assembler);
+  DoBinaryOp(CodeFactory::StrictEqual(isolate_), assembler);
 }
 
 
@@ -1172,7 +1023,7 @@
 //
 // Test if the value in the <src> register is less than the accumulator.
 void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kLessThan, assembler);
+  DoBinaryOp(CodeFactory::LessThan(isolate_), assembler);
 }
 
 
@@ -1180,7 +1031,7 @@
 //
 // Test if the value in the <src> register is greater than the accumulator.
 void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kGreaterThan, assembler);
+  DoBinaryOp(CodeFactory::GreaterThan(isolate_), assembler);
 }
 
 
@@ -1189,7 +1040,7 @@
 // Test if the value in the <src> register is less than or equal to the
 // accumulator.
 void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kLessThanOrEqual, assembler);
+  DoBinaryOp(CodeFactory::LessThanOrEqual(isolate_), assembler);
 }
 
 
@@ -1198,7 +1049,7 @@
 // Test if the value in the <src> register is greater than or equal to the
 // accumulator.
 void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kGreaterThanOrEqual, assembler);
+  DoBinaryOp(CodeFactory::GreaterThanOrEqual(isolate_), assembler);
 }
 
 
@@ -1219,16 +1070,22 @@
   DoBinaryOp(Runtime::kInstanceOf, assembler);
 }
 
+void Interpreter::DoTypeConversionOp(Callable callable,
+                                     InterpreterAssembler* assembler) {
+  Node* target = __ HeapConstant(callable.code());
+  Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallStub(callable.descriptor(), target, context, accumulator);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
 
 // ToName
 //
 // Cast the object referenced by the accumulator to a name.
 void Interpreter::DoToName(InterpreterAssembler* assembler) {
-  Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(Runtime::kToName, context, accumulator);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  DoTypeConversionOp(CodeFactory::ToName(isolate_), assembler);
 }
 
 
@@ -1236,11 +1093,7 @@
 //
 // Cast the object referenced by the accumulator to a number.
 void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
-  Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(Runtime::kToNumber, context, accumulator);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  DoTypeConversionOp(CodeFactory::ToNumber(isolate_), assembler);
 }
 
 
@@ -1248,26 +1101,20 @@
 //
 // Cast the object referenced by the accumulator to a JSObject.
 void Interpreter::DoToObject(InterpreterAssembler* assembler) {
-  Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(Runtime::kToObject, context, accumulator);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  DoTypeConversionOp(CodeFactory::ToObject(isolate_), assembler);
 }
 
-
-// Jump <imm8>
+// Jump <imm>
 //
-// Jump by number of bytes represented by the immediate operand |imm8|.
+// Jump by number of bytes represented by the immediate operand |imm|.
 void Interpreter::DoJump(InterpreterAssembler* assembler) {
   Node* relative_jump = __ BytecodeOperandImm(0);
   __ Jump(relative_jump);
 }
 
-
-// JumpConstant <idx8>
+// JumpConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool.
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
 void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
@@ -1275,17 +1122,7 @@
   __ Jump(relative_jump);
 }
 
-
-// JumpConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the
-// constant pool.
-void Interpreter::DoJumpConstantWide(InterpreterAssembler* assembler) {
-  DoJumpConstant(assembler);
-}
-
-
-// JumpIfTrue <imm8>
+// JumpIfTrue <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the
 // accumulator contains true.
@@ -1296,10 +1133,9 @@
   __ JumpIfWordEqual(accumulator, true_value, relative_jump);
 }
 
-
-// JumpIfTrueConstant <idx8>
+// JumpIfTrueConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
 // if the accumulator contains true.
 void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
@@ -1310,17 +1146,7 @@
   __ JumpIfWordEqual(accumulator, true_value, relative_jump);
 }
 
-
-// JumpIfTrueConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the accumulator contains true.
-void Interpreter::DoJumpIfTrueConstantWide(InterpreterAssembler* assembler) {
-  DoJumpIfTrueConstant(assembler);
-}
-
-
-// JumpIfFalse <imm8>
+// JumpIfFalse <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the
 // accumulator contains false.
@@ -1331,10 +1157,9 @@
   __ JumpIfWordEqual(accumulator, false_value, relative_jump);
 }
 
-
-// JumpIfFalseConstant <idx8>
+// JumpIfFalseConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
 // if the accumulator contains false.
 void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
@@ -1345,42 +1170,35 @@
   __ JumpIfWordEqual(accumulator, false_value, relative_jump);
 }
 
-
-// JumpIfFalseConstant <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the accumulator contains false.
-void Interpreter::DoJumpIfFalseConstantWide(InterpreterAssembler* assembler) {
-  DoJumpIfFalseConstant(assembler);
-}
-
-
-// JumpIfToBooleanTrue <imm8>
+// JumpIfToBooleanTrue <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is true when the object is cast to boolean.
 void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::ToBoolean(isolate_);
+  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
+      __ CallStub(callable.descriptor(), target, context, accumulator);
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* true_value = __ BooleanConstant(true);
   __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
 }
 
-
-// JumpIfToBooleanTrueConstant <idx8>
+// JumpIfToBooleanTrueConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
 // if the object referenced by the accumulator is true when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanTrueConstant(
     InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::ToBoolean(isolate_);
+  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
+      __ CallStub(callable.descriptor(), target, context, accumulator);
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1388,44 +1206,35 @@
   __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
 }
 
-
-// JumpIfToBooleanTrueConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is true when the object is cast
-// to boolean.
-void Interpreter::DoJumpIfToBooleanTrueConstantWide(
-    InterpreterAssembler* assembler) {
-  DoJumpIfToBooleanTrueConstant(assembler);
-}
-
-
-// JumpIfToBooleanFalse <imm8>
+// JumpIfToBooleanFalse <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is false when the object is cast to boolean.
 void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::ToBoolean(isolate_);
+  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
+      __ CallStub(callable.descriptor(), target, context, accumulator);
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* false_value = __ BooleanConstant(false);
   __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
 }
 
-
-// JumpIfToBooleanFalseConstant <idx8>
+// JumpIfToBooleanFalseConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
 // if the object referenced by the accumulator is false when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanFalseConstant(
     InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::ToBoolean(isolate_);
+  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
+      __ CallStub(callable.descriptor(), target, context, accumulator);
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1433,19 +1242,7 @@
   __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
 }
 
-
-// JumpIfToBooleanFalseConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is false when the object is cast
-// to boolean.
-void Interpreter::DoJumpIfToBooleanFalseConstantWide(
-    InterpreterAssembler* assembler) {
-  DoJumpIfToBooleanFalseConstant(assembler);
-}
-
-
-// JumpIfNull <imm8>
+// JumpIfNull <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is the null constant.
@@ -1456,10 +1253,9 @@
   __ JumpIfWordEqual(accumulator, null_value, relative_jump);
 }
 
-
-// JumpIfNullConstant <idx8>
+// JumpIfNullConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
 // if the object referenced by the accumulator is the null constant.
 void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
@@ -1470,16 +1266,7 @@
   __ JumpIfWordEqual(accumulator, null_value, relative_jump);
 }
 
-
-// JumpIfNullConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNullConstantWide(InterpreterAssembler* assembler) {
-  DoJumpIfNullConstant(assembler);
-}
-
-// JumpIfUndefined <imm8>
+// JumpIfUndefined <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is the undefined constant.
@@ -1491,10 +1278,9 @@
   __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
 }
 
-
-// JumpIfUndefinedConstant <idx8>
+// JumpIfUndefinedConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
 // if the object referenced by the accumulator is the undefined constant.
 void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
@@ -1506,17 +1292,7 @@
   __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
 }
 
-
-// JumpIfUndefinedConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is the undefined constant.
-void Interpreter::DoJumpIfUndefinedConstantWide(
-    InterpreterAssembler* assembler) {
-  DoJumpIfUndefinedConstant(assembler);
-}
-
-// JumpIfNotHole <imm8>
+// JumpIfNotHole <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is the hole.
@@ -1527,9 +1303,9 @@
   __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
 }
 
-// JumpIfNotHoleConstant <idx8>
+// JumpIfNotHoleConstant <idx>
 //
-// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
 // if the object referenced by the accumulator is the hole constant.
 void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
@@ -1540,21 +1316,13 @@
   __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
 }
 
-// JumpIfNotHoleConstantWide <idx16>
-//
-// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
-// if the object referenced by the accumulator is the hole constant.
-void Interpreter::DoJumpIfNotHoleConstantWide(InterpreterAssembler* assembler) {
-  DoJumpIfNotHoleConstant(assembler);
-}
-
 void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
                                   InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant_elements = __ LoadConstantPoolEntry(index);
   Node* literal_index_raw = __ BytecodeOperandIdx(1);
   Node* literal_index = __ SmiTag(literal_index_raw);
-  Node* flags_raw = __ BytecodeOperandImm(2);
+  Node* flags_raw = __ BytecodeOperandFlag(2);
   Node* flags = __ SmiTag(flags_raw);
   Node* closure = __ LoadRegister(Register::function_closure());
   Node* context = __ GetContext();
@@ -1570,19 +1338,22 @@
 // Creates a regular expression literal for literal index <literal_idx> with
 // <flags> and the pattern in <pattern_idx>.
 void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
-  DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
+  Callable callable = CodeFactory::FastCloneRegExp(isolate_);
+  Node* target = __ HeapConstant(callable.code());
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* pattern = __ LoadConstantPoolEntry(index);
+  Node* literal_index_raw = __ BytecodeOperandIdx(1);
+  Node* literal_index = __ SmiTag(literal_index_raw);
+  Node* flags_raw = __ BytecodeOperandFlag(2);
+  Node* flags = __ SmiTag(flags_raw);
+  Node* closure = __ LoadRegister(Register::function_closure());
+  Node* context = __ GetContext();
+  Node* result = __ CallStub(callable.descriptor(), target, context, closure,
+                             literal_index, pattern, flags);
+  __ SetAccumulator(result);
+  __ Dispatch();
 }
 
-
-// CreateRegExpLiteralWide <pattern_idx> <literal_idx> <flags>
-//
-// Creates a regular expression literal for literal index <literal_idx> with
-// <flags> and the pattern in <pattern_idx>.
-void Interpreter::DoCreateRegExpLiteralWide(InterpreterAssembler* assembler) {
-  DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
-}
-
-
 // CreateArrayLiteral <element_idx> <literal_idx> <flags>
 //
 // Creates an array literal for literal index <literal_idx> with flags <flags>
@@ -1591,16 +1362,6 @@
   DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
 }
 
-
-// CreateArrayLiteralWide <element_idx> <literal_idx> <flags>
-//
-// Creates an array literal for literal index <literal_idx> with flags <flags>
-// and constant elements in <element_idx>.
-void Interpreter::DoCreateArrayLiteralWide(InterpreterAssembler* assembler) {
-  DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
-}
-
-
 // CreateObjectLiteral <element_idx> <literal_idx> <flags>
 //
 // Creates an object literal for literal index <literal_idx> with flags <flags>
@@ -1609,16 +1370,6 @@
   DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
 }
 
-
-// CreateObjectLiteralWide <element_idx> <literal_idx> <flags>
-//
-// Creates an object literal for literal index <literal_idx> with flags <flags>
-// and constant elements in <element_idx>.
-void Interpreter::DoCreateObjectLiteralWide(InterpreterAssembler* assembler) {
-  DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
-}
-
-
 // CreateClosure <index> <tenured>
 //
 // Creates a new closure for SharedFunctionInfo at position |index| in the
@@ -1628,7 +1379,7 @@
   // calling into the runtime.
   Node* index = __ BytecodeOperandIdx(0);
   Node* shared = __ LoadConstantPoolEntry(index);
-  Node* tenured_raw = __ BytecodeOperandImm(1);
+  Node* tenured_raw = __ BytecodeOperandFlag(1);
   Node* tenured = __ SmiTag(tenured_raw);
   Node* context = __ GetContext();
   Node* result =
@@ -1637,16 +1388,6 @@
   __ Dispatch();
 }
 
-
-// CreateClosureWide <index> <tenured>
-//
-// Creates a new closure for SharedFunctionInfo at position |index| in the
-// constant pool and with the PretenureFlag <tenured>.
-void Interpreter::DoCreateClosureWide(InterpreterAssembler* assembler) {
-  return DoCreateClosure(assembler);
-}
-
-
 // CreateMappedArguments
 //
 // Creates a new mapped arguments object.
@@ -1737,11 +1478,13 @@
 // DebugBreak
 //
 // Call runtime to handle a debug break.
-#define DEBUG_BREAK(Name, ...)                                              \
-  void Interpreter::Do##Name(InterpreterAssembler* assembler) {             \
-    Node* context = __ GetContext();                                        \
-    Node* original_handler = __ CallRuntime(Runtime::kDebugBreak, context); \
-    __ DispatchToBytecodeHandler(original_handler);                         \
+#define DEBUG_BREAK(Name, ...)                                                \
+  void Interpreter::Do##Name(InterpreterAssembler* assembler) {               \
+    Node* context = __ GetContext();                                          \
+    Node* accumulator = __ GetAccumulator();                                  \
+    Node* original_handler =                                                  \
+        __ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
+    __ DispatchToBytecodeHandler(original_handler);                           \
   }
 DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
 #undef DEBUG_BREAK
@@ -1768,18 +1511,6 @@
   __ Dispatch();
 }
 
-
-// ForInPrepareWide <cache_info_triple>
-//
-// Returns state for for..in loop execution based on the object in the
-// accumulator. The result is output in registers |cache_info_triple| to
-// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
-// and cache_length respectively.
-void Interpreter::DoForInPrepareWide(InterpreterAssembler* assembler) {
-  DoForInPrepare(assembler);
-}
-
-
 // ForInNext <receiver> <index> <cache_info_pair>
 //
 // Returns the next enumerable property in the the accumulator.
@@ -1792,53 +1523,101 @@
   Node* cache_type = __ LoadRegister(cache_type_reg);
   Node* cache_array_reg = __ NextRegister(cache_type_reg);
   Node* cache_array = __ LoadRegister(cache_array_reg);
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(Runtime::kForInNext, context, receiver,
-                                cache_array, cache_type, index);
-  __ SetAccumulator(result);
-  __ Dispatch();
+
+  // Load the next key from the enumeration array.
+  Node* key = __ LoadFixedArrayElementSmiIndex(cache_array, index);
+
+  // Check if we can use the for-in fast path potentially using the enum cache.
+  InterpreterAssembler::Label if_fast(assembler), if_slow(assembler);
+  Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
+  Node* condition = __ WordEqual(receiver_map, cache_type);
+  __ Branch(condition, &if_fast, &if_slow);
+  __ Bind(&if_fast);
+  {
+    // Enum cache in use for {receiver}, the {key} is definitely valid.
+    __ SetAccumulator(key);
+    __ Dispatch();
+  }
+  __ Bind(&if_slow);
+  {
+    // Record the fact that we hit the for-in slow path.
+    Node* vector_index = __ BytecodeOperandIdx(3);
+    Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+    Node* megamorphic_sentinel =
+        __ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_));
+    __ StoreFixedArrayElementNoWriteBarrier(type_feedback_vector, vector_index,
+                                            megamorphic_sentinel);
+
+    // Need to filter the {key} for the {receiver}.
+    Node* context = __ GetContext();
+    Node* result =
+        __ CallRuntime(Runtime::kForInFilter, context, receiver, key);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
 }
 
-
-// ForInNextWide <receiver> <index> <cache_info_pair>
-//
-// Returns the next enumerable property in the the accumulator.
-void Interpreter::DoForInNextWide(InterpreterAssembler* assembler) {
-  return DoForInNext(assembler);
-}
-
-
 // ForInDone <index> <cache_length>
 //
 // Returns true if the end of the enumerable properties has been reached.
 void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
-  // TODO(oth): Implement directly rather than making a runtime call.
   Node* index_reg = __ BytecodeOperandReg(0);
   Node* index = __ LoadRegister(index_reg);
   Node* cache_length_reg = __ BytecodeOperandReg(1);
   Node* cache_length = __ LoadRegister(cache_length_reg);
-  Node* context = __ GetContext();
-  Node* result =
-      __ CallRuntime(Runtime::kForInDone, context, index, cache_length);
-  __ SetAccumulator(result);
-  __ Dispatch();
-}
 
+  // Check if {index} is at {cache_length} already.
+  InterpreterAssembler::Label if_true(assembler), if_false(assembler);
+  Node* condition = __ WordEqual(index, cache_length);
+  __ Branch(condition, &if_true, &if_false);
+  __ Bind(&if_true);
+  {
+    Node* result = __ BooleanConstant(true);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
+  __ Bind(&if_false);
+  {
+    Node* result = __ BooleanConstant(false);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
+}
 
 // ForInStep <index>
 //
 // Increments the loop counter in register |index| and stores the result
 // in the accumulator.
 void Interpreter::DoForInStep(InterpreterAssembler* assembler) {
-  // TODO(oth): Implement directly rather than making a runtime call.
   Node* index_reg = __ BytecodeOperandReg(0);
   Node* index = __ LoadRegister(index_reg);
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(Runtime::kForInStep, context, index);
+  Node* one = __ SmiConstant(Smi::FromInt(1));
+  Node* result = __ SmiAdd(index, one);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
+// Wide
+//
+// Prefix bytecode indicating next bytecode has wide (16-bit) operands.
+void Interpreter::DoWide(InterpreterAssembler* assembler) {
+  __ DispatchWide(OperandScale::kDouble);
+}
+
+// ExtraWide
+//
+// Prefix bytecode indicating next bytecode has extra-wide (32-bit) operands.
+void Interpreter::DoExtraWide(InterpreterAssembler* assembler) {
+  __ DispatchWide(OperandScale::kQuadruple);
+}
+
+// Illegal
+//
+// An invalid bytecode aborting execution if dispatched.
+void Interpreter::DoIllegal(InterpreterAssembler* assembler) {
+  __ Abort(kInvalidBytecode);
+}
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
index e02e914..ea50faa 100644
--- a/src/interpreter/interpreter.h
+++ b/src/interpreter/interpreter.h
@@ -40,12 +40,14 @@
   static bool MakeBytecode(CompilationInfo* info);
 
   // Return bytecode handler for |bytecode|.
-  Code* GetBytecodeHandler(Bytecode bytecode);
+  Code* GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);
 
   // GC support.
   void IterateDispatchTable(ObjectVisitor* v);
 
-  void TraceCodegen(Handle<Code> code, const char* name);
+  // Disassembler support (only useful with ENABLE_DISASSEMBLER defined).
+  void TraceCodegen(Handle<Code> code);
+  const char* LookupNameOfBytecodeHandler(Code* code);
 
   Address dispatch_table_address() {
     return reinterpret_cast<Address>(&dispatch_table_[0]);
@@ -58,6 +60,9 @@
   BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
 #undef DECLARE_BYTECODE_HANDLER_GENERATOR
 
+  // Generates code to perform the binary operations via |callable|.
+  void DoBinaryOp(Callable callable, InterpreterAssembler* assembler);
+
   // Generates code to perform the binary operations via |function_id|.
   void DoBinaryOp(Runtime::FunctionId function_id,
                   InterpreterAssembler* assembler);
@@ -103,9 +108,12 @@
   // Generates code to perform a JS runtime call.
   void DoCallJSRuntimeCommon(InterpreterAssembler* assembler);
 
-  // Generates code to perform a constructor call..
+  // Generates code to perform a constructor call.
   void DoCallConstruct(InterpreterAssembler* assembler);
 
+  // Generates code to perform a type conversion.
+  void DoTypeConversionOp(Callable callable, InterpreterAssembler* assembler);
+
   // Generates code ro create a literal via |function_id|.
   void DoCreateLiteral(Runtime::FunctionId function_id,
                        InterpreterAssembler* assembler);
@@ -122,9 +130,14 @@
   void DoStoreLookupSlot(LanguageMode language_mode,
                          InterpreterAssembler* assembler);
 
+  // Get dispatch table index of bytecode.
+  static size_t GetDispatchTableIndex(Bytecode bytecode,
+                                      OperandScale operand_scale);
+
   bool IsDispatchTableInitialized();
 
-  static const int kDispatchTableSize = static_cast<int>(Bytecode::kLast) + 1;
+  static const int kNumberOfWideVariants = 3;
+  static const int kDispatchTableSize = kNumberOfWideVariants * (kMaxUInt8 + 1);
 
   Isolate* isolate_;
   Code* dispatch_table_[kDispatchTableSize];
diff --git a/src/interpreter/register-translator.cc b/src/interpreter/register-translator.cc
deleted file mode 100644
index 3eba42f..0000000
--- a/src/interpreter/register-translator.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/interpreter/register-translator.h"
-
-#include "src/interpreter/bytecode-array-builder.h"
-
-namespace v8 {
-namespace internal {
-namespace interpreter {
-
-RegisterTranslator::RegisterTranslator(RegisterMover* mover)
-    : mover_(mover),
-      emitting_moves_(false),
-      window_registers_count_(0),
-      output_moves_count_(0) {}
-
-void RegisterTranslator::TranslateInputRegisters(Bytecode bytecode,
-                                                 uint32_t* raw_operands,
-                                                 int raw_operand_count) {
-  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), raw_operand_count);
-  if (!emitting_moves_) {
-    emitting_moves_ = true;
-    DCHECK_EQ(window_registers_count_, 0);
-    int register_bitmap = Bytecodes::GetRegisterOperandBitmap(bytecode);
-    for (int i = 0; i < raw_operand_count; i++) {
-      if ((register_bitmap & (1 << i)) == 0) {
-        continue;
-      }
-      Register in_reg = Register::FromRawOperand(raw_operands[i]);
-      Register out_reg = TranslateAndMove(bytecode, i, in_reg);
-      raw_operands[i] = out_reg.ToRawOperand();
-    }
-    window_registers_count_ = 0;
-    emitting_moves_ = false;
-  } else {
-    // When the register translator is translating registers, it will
-    // cause the bytecode generator to emit moves on it's behalf. This
-    // path is reached by these moves.
-    DCHECK(bytecode == Bytecode::kMovWide && raw_operand_count == 2 &&
-           Register::FromRawOperand(raw_operands[0]).is_valid() &&
-           Register::FromRawOperand(raw_operands[1]).is_valid());
-  }
-}
-
-Register RegisterTranslator::TranslateAndMove(Bytecode bytecode,
-                                              int operand_index, Register reg) {
-  if (FitsInReg8Operand(reg)) {
-    return reg;
-  }
-
-  OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
-  OperandSize operand_size = Bytecodes::SizeOfOperand(operand_type);
-  if (operand_size == OperandSize::kShort) {
-    CHECK(FitsInReg16Operand(reg));
-    return Translate(reg);
-  }
-
-  CHECK((operand_type == OperandType::kReg8 ||
-         operand_type == OperandType::kRegOut8) &&
-        RegisterIsMovableToWindow(bytecode, operand_index));
-  Register translated_reg = Translate(reg);
-  Register window_reg(kTranslationWindowStart + window_registers_count_);
-  window_registers_count_ += 1;
-  if (Bytecodes::IsRegisterInputOperandType(operand_type)) {
-    DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_type));
-    mover()->MoveRegisterUntranslated(translated_reg, window_reg);
-  } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) {
-    DCHECK_LT(output_moves_count_, kTranslationWindowLength);
-    output_moves_[output_moves_count_] =
-        std::make_pair(window_reg, translated_reg);
-    output_moves_count_ += 1;
-  } else {
-    UNREACHABLE();
-  }
-  return window_reg;
-}
-
-// static
-bool RegisterTranslator::RegisterIsMovableToWindow(Bytecode bytecode,
-                                                   int operand_index) {
-  // By design, we only support moving individual registers. There
-  // should be wide variants of such bytecodes instead to avoid the
-  // need for a large translation window.
-  OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
-  if (operand_type != OperandType::kReg8 &&
-      operand_type != OperandType::kRegOut8) {
-    return false;
-  } else if (operand_index + 1 == Bytecodes::NumberOfOperands(bytecode)) {
-    return true;
-  } else {
-    OperandType next_operand_type =
-        Bytecodes::GetOperandType(bytecode, operand_index + 1);
-    return (next_operand_type != OperandType::kRegCount8 &&
-            next_operand_type != OperandType::kRegCount16);
-  }
-}
-
-void RegisterTranslator::TranslateOutputRegisters() {
-  if (!emitting_moves_) {
-    emitting_moves_ = true;
-    while (output_moves_count_ > 0) {
-      output_moves_count_ -= 1;
-      mover()->MoveRegisterUntranslated(
-          output_moves_[output_moves_count_].first,
-          output_moves_[output_moves_count_].second);
-    }
-    emitting_moves_ = false;
-  }
-}
-
-// static
-Register RegisterTranslator::Translate(Register reg) {
-  if (reg.index() >= kTranslationWindowStart) {
-    return Register(reg.index() + kTranslationWindowLength);
-  } else {
-    return reg;
-  }
-}
-
-// static
-bool RegisterTranslator::InTranslationWindow(Register reg) {
-  return (reg.index() >= kTranslationWindowStart &&
-          reg.index() <= kTranslationWindowLimit);
-}
-
-// static
-Register RegisterTranslator::UntranslateRegister(Register reg) {
-  if (reg.index() >= kTranslationWindowStart) {
-    return Register(reg.index() - kTranslationWindowLength);
-  } else {
-    return reg;
-  }
-}
-
-// static
-int RegisterTranslator::DistanceToTranslationWindow(Register reg) {
-  return kTranslationWindowStart - reg.index();
-}
-
-// static
-bool RegisterTranslator::FitsInReg8Operand(Register reg) {
-  return reg.is_byte_operand() && reg.index() < kTranslationWindowStart;
-}
-
-// static
-bool RegisterTranslator::FitsInReg16Operand(Register reg) {
-  int max_index = Register::MaxRegisterIndex() - kTranslationWindowLength + 1;
-  return reg.is_short_operand() && reg.index() < max_index;
-}
-
-// static
-int RegisterTranslator::RegisterCountAdjustment(int register_count,
-                                                int parameter_count) {
-  if (register_count > kTranslationWindowStart) {
-    return kTranslationWindowLength;
-  } else if (parameter_count > 0) {
-    Register param0 = Register::FromParameterIndex(0, parameter_count);
-    if (!param0.is_byte_operand()) {
-      // TODO(oth): Number of parameters means translation is
-      // required, but the translation window location is such that
-      // some space is wasted. Hopefully a rare corner case, but could
-      // relocate window to limit waste.
-      return kTranslationWindowLimit + 1 - register_count;
-    }
-  }
-  return 0;
-}
-
-}  // namespace interpreter
-}  // namespace internal
-}  // namespace v8
diff --git a/src/interpreter/register-translator.h b/src/interpreter/register-translator.h
deleted file mode 100644
index b683a89..0000000
--- a/src/interpreter/register-translator.h
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INTERPRETER_REGISTER_TRANSLATOR_H_
-#define V8_INTERPRETER_REGISTER_TRANSLATOR_H_
-
-#include "src/interpreter/bytecodes.h"
-
-namespace v8 {
-namespace internal {
-namespace interpreter {
-
-class RegisterMover;
-
-// A class that enables bytecodes having only byte sized register operands
-// to access all registers in the two byte space. Most bytecode uses few
-// registers so space can be saved if most bytecodes with register operands
-// just take byte operands.
-//
-// To reach the wider register space, a translation window is reserved in
-// the byte addressable space specifically for copying registers into and
-// out of before a bytecode is emitted. The translation window occupies
-// the last register slots at the top of the byte addressable range.
-//
-// Because of the translation window any registers which naturally lie
-// at above the translation window have to have their register index
-// incremented by the window width before they are emitted.
-//
-// This class does not support moving ranges of registers to and from
-// the translation window. It would be straightforward to add support
-// for constrained ranges, e.g. kRegPair8, kRegTriple8 operands, but
-// these would have two negative effects. The translation window would
-// need to be wider, further limiting the space for byte operands. And
-// every register in a range would need to be moved consuming more
-// space in the bytecode array.
-class RegisterTranslator final {
- public:
-  explicit RegisterTranslator(RegisterMover* mover);
-
-  // Translate and re-write the register operands that are inputs
-  // to |bytecode| when it is about to be emitted.
-  void TranslateInputRegisters(Bytecode bytecode, uint32_t* raw_operands,
-                               int raw_operand_count);
-
-  // Translate and re-write the register operands that are outputs
-  // from |bytecode| when it has just been output.
-  void TranslateOutputRegisters();
-
-  // Returns true if |reg| is in the translation window.
-  static bool InTranslationWindow(Register reg);
-
-  // Return register value as if it had been translated.
-  static Register UntranslateRegister(Register reg);
-
-  // Returns the distance in registers between the translation window
-  // start and |reg|. The result is negative when |reg| is above the
-  // start of the translation window.
-  static int DistanceToTranslationWindow(Register reg);
-
-  // Returns true if |reg| can be represented as an 8-bit operand
-  // after translation.
-  static bool FitsInReg8Operand(Register reg);
-
-  // Returns true if |reg| can be represented as an 16-bit operand
-  // after translation.
-  static bool FitsInReg16Operand(Register reg);
-
-  // Returns the increment to the register count necessary if the
-  // value indicates the translation window is required.
-  static int RegisterCountAdjustment(int register_count, int parameter_count);
-
- private:
-  static const int kTranslationWindowLength = 4;
-  static const int kTranslationWindowLimit = -kMinInt8;
-  static const int kTranslationWindowStart =
-      kTranslationWindowLimit - kTranslationWindowLength + 1;
-
-  Register TranslateAndMove(Bytecode bytecode, int operand_index, Register reg);
-  static bool RegisterIsMovableToWindow(Bytecode bytecode, int operand_index);
-
-  static Register Translate(Register reg);
-
-  RegisterMover* mover() const { return mover_; }
-
-  // Entity to perform register moves necessary to translate registers
-  // and ensure reachability.
-  RegisterMover* mover_;
-
-  // Flag to avoid re-entrancy when emitting move bytecodes for
-  // translation.
-  bool emitting_moves_;
-
-  // Number of window registers in use.
-  int window_registers_count_;
-
-  // State for restoring register moves emitted by TranslateOutputRegisters.
-  std::pair<Register, Register> output_moves_[kTranslationWindowLength];
-  int output_moves_count_;
-};
-
-// Interface for RegisterTranslator helper class that will emit
-// register move bytecodes at the translator's behest.
-class RegisterMover {
- public:
-  virtual ~RegisterMover() {}
-
-  // Move register |from| to register |to| with no translation.
-  // returns false if either register operand is invalid. Implementations
-  // of this method must be aware that register moves with bad
-  // register values are a security hole.
-  virtual void MoveRegisterUntranslated(Register from, Register to) = 0;
-};
-
-}  // namespace interpreter
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_INTERPRETER_REGISTER_TRANSLATOR_H_
diff --git a/src/interpreter/source-position-table.cc b/src/interpreter/source-position-table.cc
index 0b7c44e..99a865b 100644
--- a/src/interpreter/source-position-table.cc
+++ b/src/interpreter/source-position-table.cc
@@ -4,7 +4,6 @@
 
 #include "src/interpreter/source-position-table.h"
 
-#include "src/assembler.h"
 #include "src/objects-inl.h"
 #include "src/objects.h"
 
@@ -12,71 +11,196 @@
 namespace internal {
 namespace interpreter {
 
-class IsStatementField : public BitField<bool, 0, 1> {};
-class SourcePositionField : public BitField<int, 1, 30> {};
+// We'll use a simple encoding scheme to record the source positions.
+// Conceptually, each position consists of:
+// - bytecode_offset: An integer index into the BytecodeArray
+// - source_position: An integer index into the source string.
+// - position type: Each position is either a statement or an expression.
+//
+// The basic idea for the encoding is to use a variable-length integer coding,
+// where each byte contains 7 bits of payload data, and 1 'more' bit that
+// determines whether additional bytes follow. Additionally:
+// - we record the difference from the previous position,
+// - we just stuff one bit for the type into the bytecode offset,
+// - we write least-significant bits first,
+// - negative numbers occur only rarely, so we use a denormalized
+//   most-significant byte (a byte with all zeros, which normally wouldn't
+//   make any sense) to encode a negative sign, so that we 'pay' nothing for
+//   positive numbers, but have to pay a full byte for negative integers.
+
+namespace {
+
+// A zero-value in the most-significant byte is used to mark negative numbers.
+const int kNegativeSignMarker = 0;
+
+// Each byte is encoded as MoreBit | ValueBits.
+class MoreBit : public BitField8<bool, 7, 1> {};
+class ValueBits : public BitField8<int, 0, 7> {};
+
+// Helper: Add the offsets from 'other' to 'value'. Also set is_statement.
+void AddAndSetEntry(PositionTableEntry& value,
+                    const PositionTableEntry& other) {
+  value.bytecode_offset += other.bytecode_offset;
+  value.source_position += other.source_position;
+  value.is_statement = other.is_statement;
+}
+
+// Helper: Substract the offsets from 'other' from 'value'.
+void SubtractFromEntry(PositionTableEntry& value,
+                       const PositionTableEntry& other) {
+  value.bytecode_offset -= other.bytecode_offset;
+  value.source_position -= other.source_position;
+}
+
+// Helper: Encode an integer.
+void EncodeInt(ZoneVector<byte>& bytes, int value) {
+  bool sign = false;
+  if (value < 0) {
+    sign = true;
+    value = -value;
+  }
+
+  bool more;
+  do {
+    more = value > ValueBits::kMax;
+    bytes.push_back(MoreBit::encode(more || sign) |
+                    ValueBits::encode(value & ValueBits::kMax));
+    value >>= ValueBits::kSize;
+  } while (more);
+
+  if (sign) {
+    bytes.push_back(MoreBit::encode(false) |
+                    ValueBits::encode(kNegativeSignMarker));
+  }
+}
+
+// Encode a PositionTableEntry.
+void EncodeEntry(ZoneVector<byte>& bytes, const PositionTableEntry& entry) {
+  // 1 bit for sign + is_statement each, which leaves 30b for the value.
+  DCHECK(abs(entry.bytecode_offset) < (1 << 30));
+  EncodeInt(bytes, (entry.is_statement ? 1 : 0) | (entry.bytecode_offset << 1));
+  EncodeInt(bytes, entry.source_position);
+}
+
+// Helper: Decode an integer.
+void DecodeInt(ByteArray* bytes, int* index, int* v) {
+  byte current;
+  int n = 0;
+  int value = 0;
+  bool more;
+  do {
+    current = bytes->get((*index)++);
+    value |= ValueBits::decode(current) << (n * ValueBits::kSize);
+    n++;
+    more = MoreBit::decode(current);
+  } while (more);
+
+  if (ValueBits::decode(current) == kNegativeSignMarker) {
+    value = -value;
+  }
+  *v = value;
+}
+
+void DecodeEntry(ByteArray* bytes, int* index, PositionTableEntry* entry) {
+  int tmp;
+  DecodeInt(bytes, index, &tmp);
+  entry->is_statement = (tmp & 1);
+
+  // Note that '>>' needs to be arithmetic shift in order to handle negative
+  // numbers properly.
+  entry->bytecode_offset = (tmp >> 1);
+
+  DecodeInt(bytes, index, &entry->source_position);
+}
+
+}  // namespace
 
 void SourcePositionTableBuilder::AddStatementPosition(size_t bytecode_offset,
                                                       int source_position) {
   int offset = static_cast<int>(bytecode_offset);
-  // If a position has already been assigned to this bytecode offset,
-  // do not reassign a new statement position.
-  if (CodeOffsetHasPosition(offset)) return;
-  uint32_t encoded = IsStatementField::encode(true) |
-                     SourcePositionField::encode(source_position);
-  entries_.push_back({offset, encoded});
+  AddEntry({offset, source_position, true});
 }
 
 void SourcePositionTableBuilder::AddExpressionPosition(size_t bytecode_offset,
                                                        int source_position) {
   int offset = static_cast<int>(bytecode_offset);
-  // If a position has already been assigned to this bytecode offset,
-  // do not reassign a new statement position.
-  if (CodeOffsetHasPosition(offset)) return;
-  uint32_t encoded = IsStatementField::encode(false) |
-                     SourcePositionField::encode(source_position);
-  entries_.push_back({offset, encoded});
+  AddEntry({offset, source_position, false});
 }
 
-void SourcePositionTableBuilder::RevertPosition(size_t bytecode_offset) {
-  int offset = static_cast<int>(bytecode_offset);
-  // If we already added a source position table entry, but the bytecode array
-  // builder ended up not outputting a bytecode for the corresponding bytecode
-  // offset, we have to remove that entry.
-  if (CodeOffsetHasPosition(offset)) entries_.pop_back();
-}
-
-Handle<FixedArray> SourcePositionTableBuilder::ToFixedArray() {
-  int length = static_cast<int>(entries_.size());
-  Handle<FixedArray> table =
-      isolate_->factory()->NewFixedArray(length * 2, TENURED);
-  for (int i = 0; i < length; i++) {
-    table->set(i * 2, Smi::FromInt(entries_[i].bytecode_offset));
-    table->set(i * 2 + 1, Smi::FromInt(entries_[i].source_position_and_type));
+void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
+  // Don't encode a new entry if this bytecode already has a source position
+  // assigned.
+  if (candidate_.bytecode_offset == entry.bytecode_offset) {
+    if (entry.is_statement) candidate_ = entry;
+    return;
   }
+
+  CommitEntry();
+  candidate_ = entry;
+}
+
+void SourcePositionTableBuilder::CommitEntry() {
+  if (candidate_.bytecode_offset == kUninitializedCandidateOffset) return;
+  PositionTableEntry tmp(candidate_);
+  SubtractFromEntry(tmp, previous_);
+  EncodeEntry(bytes_, tmp);
+  previous_ = candidate_;
+
+  if (candidate_.is_statement) {
+    LOG_CODE_EVENT(isolate_, CodeLinePosInfoAddStatementPositionEvent(
+                                 jit_handler_data_, candidate_.bytecode_offset,
+                                 candidate_.source_position));
+  }
+  LOG_CODE_EVENT(isolate_, CodeLinePosInfoAddPositionEvent(
+                               jit_handler_data_, candidate_.bytecode_offset,
+                               candidate_.source_position));
+
+#ifdef ENABLE_SLOW_DCHECKS
+  raw_entries_.push_back(candidate_);
+#endif
+}
+
+Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable() {
+  CommitEntry();
+  if (bytes_.empty()) return isolate_->factory()->empty_byte_array();
+
+  Handle<ByteArray> table = isolate_->factory()->NewByteArray(
+      static_cast<int>(bytes_.size()), TENURED);
+
+  MemCopy(table->GetDataStartAddress(), &*bytes_.begin(), bytes_.size());
+
+#ifdef ENABLE_SLOW_DCHECKS
+  // Brute force testing: Record all positions and decode
+  // the entire table to verify they are identical.
+  auto raw = raw_entries_.begin();
+  for (SourcePositionTableIterator encoded(*table); !encoded.done();
+       encoded.Advance(), raw++) {
+    DCHECK(raw != raw_entries_.end());
+    DCHECK_EQ(encoded.bytecode_offset(), raw->bytecode_offset);
+    DCHECK_EQ(encoded.source_position(), raw->source_position);
+    DCHECK_EQ(encoded.is_statement(), raw->is_statement);
+  }
+  DCHECK(raw == raw_entries_.end());
+#endif
+
   return table;
 }
 
-SourcePositionTableIterator::SourcePositionTableIterator(
-    BytecodeArray* bytecode_array)
-    : table_(bytecode_array->source_position_table()),
-      index_(0),
-      length_(table_->length()) {
-  DCHECK(table_->length() % 2 == 0);
+SourcePositionTableIterator::SourcePositionTableIterator(ByteArray* byte_array)
+    : table_(byte_array), index_(0), current_() {
   Advance();
 }
 
 void SourcePositionTableIterator::Advance() {
-  if (index_ < length_) {
-    int new_bytecode_offset = Smi::cast(table_->get(index_))->value();
-    // Bytecode offsets are in ascending order.
-    DCHECK(bytecode_offset_ < new_bytecode_offset || index_ == 0);
-    bytecode_offset_ = new_bytecode_offset;
-    uint32_t source_position_and_type =
-        static_cast<uint32_t>(Smi::cast(table_->get(index_ + 1))->value());
-    is_statement_ = IsStatementField::decode(source_position_and_type);
-    source_position_ = SourcePositionField::decode(source_position_and_type);
+  DCHECK(!done());
+  DCHECK(index_ >= 0 && index_ <= table_->length());
+  if (index_ == table_->length()) {
+    index_ = kDone;
+  } else {
+    PositionTableEntry tmp;
+    DecodeEntry(table_, &index_, &tmp);
+    AddAndSetEntry(current_, tmp);
   }
-  index_ += 2;
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/source-position-table.h b/src/interpreter/source-position-table.h
index 336cf42..3ac58d6 100644
--- a/src/interpreter/source-position-table.h
+++ b/src/interpreter/source-position-table.h
@@ -6,72 +6,90 @@
 #define V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
 
 #include "src/assert-scope.h"
+#include "src/checks.h"
 #include "src/handles.h"
-#include "src/zone.h"
+#include "src/log.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 
 class BytecodeArray;
-class FixedArray;
+class ByteArray;
 class Isolate;
+class Zone;
 
 namespace interpreter {
 
-class SourcePositionTableBuilder {
+struct PositionTableEntry {
+  PositionTableEntry()
+      : bytecode_offset(0), source_position(0), is_statement(false) {}
+  PositionTableEntry(int bytecode, int source, bool statement)
+      : bytecode_offset(bytecode),
+        source_position(source),
+        is_statement(statement) {}
+
+  int bytecode_offset;
+  int source_position;
+  bool is_statement;
+};
+
+class SourcePositionTableBuilder : public PositionsRecorder {
  public:
-  explicit SourcePositionTableBuilder(Isolate* isolate, Zone* zone)
-      : isolate_(isolate), entries_(zone) {}
+  SourcePositionTableBuilder(Isolate* isolate, Zone* zone)
+      : isolate_(isolate),
+        bytes_(zone),
+#ifdef ENABLE_SLOW_DCHECKS
+        raw_entries_(zone),
+#endif
+        candidate_(kUninitializedCandidateOffset, 0, false) {
+  }
 
   void AddStatementPosition(size_t bytecode_offset, int source_position);
   void AddExpressionPosition(size_t bytecode_offset, int source_position);
-  void RevertPosition(size_t bytecode_offset);
-  Handle<FixedArray> ToFixedArray();
+  Handle<ByteArray> ToSourcePositionTable();
 
  private:
-  struct Entry {
-    int bytecode_offset;
-    uint32_t source_position_and_type;
-  };
+  static const int kUninitializedCandidateOffset = -1;
 
-  bool CodeOffsetHasPosition(int bytecode_offset) {
-    // Return whether bytecode offset already has a position assigned.
-    return entries_.size() > 0 &&
-           entries_.back().bytecode_offset == bytecode_offset;
-  }
+  void AddEntry(const PositionTableEntry& entry);
+  void CommitEntry();
 
   Isolate* isolate_;
-  ZoneVector<Entry> entries_;
+  ZoneVector<byte> bytes_;
+#ifdef ENABLE_SLOW_DCHECKS
+  ZoneVector<PositionTableEntry> raw_entries_;
+#endif
+  PositionTableEntry candidate_;  // Next entry to be written, if initialized.
+  PositionTableEntry previous_;   // Previously written entry, to compute delta.
 };
 
 class SourcePositionTableIterator {
  public:
-  explicit SourcePositionTableIterator(BytecodeArray* bytecode_array);
+  explicit SourcePositionTableIterator(ByteArray* byte_array);
 
   void Advance();
 
   int bytecode_offset() const {
     DCHECK(!done());
-    return bytecode_offset_;
+    return current_.bytecode_offset;
   }
   int source_position() const {
     DCHECK(!done());
-    return source_position_;
+    return current_.source_position;
   }
   bool is_statement() const {
     DCHECK(!done());
-    return is_statement_;
+    return current_.is_statement;
   }
-  bool done() const { return index_ > length_; }
+  bool done() const { return index_ == kDone; }
 
  private:
-  FixedArray* table_;
+  static const int kDone = -1;
+
+  ByteArray* table_;
   int index_;
-  int length_;
-  bool is_statement_;
-  int bytecode_offset_;
-  int source_position_;
+  PositionTableEntry current_;
   DisallowHeapAllocation no_gc;
 };