Upgrade V8 to 5.1.281.57  DO NOT MERGE

FPIIM-449

Change-Id: Id981b686b4d587ac31697662eb98bb34be42ad90
(cherry picked from commit 3b9bc31999c9787eb726ecdbfd5796bfdec32a18)
diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc
index 440e879..2663e4a 100644
--- a/src/interpreter/interpreter-assembler.cc
+++ b/src/interpreter/interpreter-assembler.cc
@@ -22,12 +22,16 @@
 using compiler::Node;
 
 InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
-                                           Bytecode bytecode)
-    : compiler::CodeStubAssembler(
-          isolate, zone, InterpreterDispatchDescriptor(isolate),
-          Code::ComputeFlags(Code::STUB), Bytecodes::ToString(bytecode), 0),
+                                           Bytecode bytecode,
+                                           OperandScale operand_scale)
+    : compiler::CodeStubAssembler(isolate, zone,
+                                  InterpreterDispatchDescriptor(isolate),
+                                  Code::ComputeFlags(Code::BYTECODE_HANDLER),
+                                  Bytecodes::ToString(bytecode), 0),
       bytecode_(bytecode),
+      operand_scale_(operand_scale),
       accumulator_(this, MachineRepresentation::kTagged),
+      accumulator_use_(AccumulatorUse::kNone),
       context_(this, MachineRepresentation::kTagged),
       bytecode_array_(this, MachineRepresentation::kTagged),
       disable_stack_check_across_call_(false),
@@ -42,11 +46,26 @@
   }
 }
 
-InterpreterAssembler::~InterpreterAssembler() {}
+InterpreterAssembler::~InterpreterAssembler() {
+  // If the following check fails the handler does not use the
+  // accumulator in the way described in the bytecode definitions in
+  // bytecodes.h.
+  DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
+}
 
-Node* InterpreterAssembler::GetAccumulator() { return accumulator_.value(); }
+Node* InterpreterAssembler::GetAccumulatorUnchecked() {
+  return accumulator_.value();
+}
+
+Node* InterpreterAssembler::GetAccumulator() {
+  DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
+  accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
+  return GetAccumulatorUnchecked();
+}
 
 void InterpreterAssembler::SetAccumulator(Node* value) {
+  DCHECK(Bytecodes::WritesAccumulator(bytecode_));
+  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
   accumulator_.Bind(value);
 }
 
@@ -79,11 +98,11 @@
 
 Node* InterpreterAssembler::LoadRegister(int offset) {
   return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
-              Int32Constant(offset));
+              IntPtrConstant(offset));
 }
 
 Node* InterpreterAssembler::LoadRegister(Register reg) {
-  return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
+  return LoadRegister(IntPtrConstant(-reg.index()));
 }
 
 Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
@@ -97,12 +116,12 @@
 
 Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
   return StoreNoWriteBarrier(MachineRepresentation::kTagged,
-                             RegisterFileRawPointer(), Int32Constant(offset),
+                             RegisterFileRawPointer(), IntPtrConstant(offset),
                              value);
 }
 
 Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
-  return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
+  return StoreRegister(value, IntPtrConstant(-reg.index()));
 }
 
 Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
@@ -113,27 +132,31 @@
 
 Node* InterpreterAssembler::NextRegister(Node* reg_index) {
   // Register indexes are negative, so the next index is minus one.
-  return IntPtrAdd(reg_index, Int32Constant(-1));
+  return IntPtrAdd(reg_index, IntPtrConstant(-1));
 }
 
-Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
-  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(OperandSize::kByte,
-            Bytecodes::GetOperandSize(bytecode_, operand_index));
-  return Load(
-      MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-      IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
-                                      bytecode_, operand_index))));
+Node* InterpreterAssembler::OperandOffset(int operand_index) {
+  return IntPtrConstant(
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
 }
 
-Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) {
   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(OperandSize::kByte,
-            Bytecodes::GetOperandSize(bytecode_, operand_index));
-  Node* load = Load(
-      MachineType::Int8(), BytecodeArrayTaggedPointer(),
-      IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
-                                      bytecode_, operand_index))));
+  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
+                                    bytecode_, operand_index, operand_scale()));
+  Node* operand_offset = OperandOffset(operand_index);
+  return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+              IntPtrAdd(BytecodeOffset(), operand_offset));
+}
+
+Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
+                                    bytecode_, operand_index, operand_scale()));
+  Node* operand_offset = OperandOffset(operand_index);
+  Node* load = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
+                    IntPtrAdd(BytecodeOffset(), operand_offset));
+
   // Ensure that we sign extend to full pointer size
   if (kPointerSize == 8) {
     load = ChangeInt32ToInt64(load);
@@ -141,58 +164,85 @@
   return load;
 }
 
-Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
-  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(OperandSize::kShort,
-            Bytecodes::GetOperandSize(bytecode_, operand_index));
-  if (TargetSupportsUnalignedAccess()) {
-    return Load(
-        MachineType::Uint16(), BytecodeArrayTaggedPointer(),
-        IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
-                                        bytecode_, operand_index))));
-  } else {
-    int offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
-    Node* first_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-                            IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
-    Node* second_byte =
-        Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-             IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
+compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
+    int relative_offset, MachineType result_type) {
+  static const int kMaxCount = 4;
+  DCHECK(!TargetSupportsUnalignedAccess());
+
+  int count;
+  switch (result_type.representation()) {
+    case MachineRepresentation::kWord16:
+      count = 2;
+      break;
+    case MachineRepresentation::kWord32:
+      count = 4;
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  MachineType msb_type =
+      result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();
+
 #if V8_TARGET_LITTLE_ENDIAN
-    return WordOr(WordShl(second_byte, kBitsPerByte), first_byte);
+  const int kStep = -1;
+  int msb_offset = count - 1;
 #elif V8_TARGET_BIG_ENDIAN
-    return WordOr(WordShl(first_byte, kBitsPerByte), second_byte);
+  const int kStep = 1;
+  int msb_offset = 0;
 #else
 #error "Unknown Architecture"
 #endif
+
+  // Read the most signicant bytecode into bytes[0] and then in order
+  // down to least significant in bytes[count - 1].
+  DCHECK(count <= kMaxCount);
+  compiler::Node* bytes[kMaxCount];
+  for (int i = 0; i < count; i++) {
+    MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
+    Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
+    Node* array_offset = IntPtrAdd(BytecodeOffset(), offset);
+    bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset);
+  }
+
+  // Pack LSB to MSB.
+  Node* result = bytes[--count];
+  for (int i = 1; --count >= 0; i++) {
+    Node* shift = Int32Constant(i * kBitsPerByte);
+    Node* value = Word32Shl(bytes[count], shift);
+    result = Word32Or(value, result);
+  }
+  return result;
+}
+
+Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(
+      OperandSize::kShort,
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
+  int operand_offset =
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
+  if (TargetSupportsUnalignedAccess()) {
+    return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
+                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+  } else {
+    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16());
   }
 }
 
-Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
-    int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) {
   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(OperandSize::kShort,
-            Bytecodes::GetOperandSize(bytecode_, operand_index));
-  int operand_offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
+  DCHECK_EQ(
+      OperandSize::kShort,
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
+  int operand_offset =
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
   Node* load;
   if (TargetSupportsUnalignedAccess()) {
     load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
-                IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
+                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
   } else {
-#if V8_TARGET_LITTLE_ENDIAN
-    Node* hi_byte_offset = Int32Constant(operand_offset + 1);
-    Node* lo_byte_offset = Int32Constant(operand_offset);
-#elif V8_TARGET_BIG_ENDIAN
-    Node* hi_byte_offset = Int32Constant(operand_offset);
-    Node* lo_byte_offset = Int32Constant(operand_offset + 1);
-#else
-#error "Unknown Architecture"
-#endif
-    Node* hi_byte = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
-                         IntPtrAdd(BytecodeOffset(), hi_byte_offset));
-    Node* lo_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-                         IntPtrAdd(BytecodeOffset(), lo_byte_offset));
-    hi_byte = Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
-    load = Word32Or(hi_byte, lo_byte);
+    load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
   }
 
   // Ensure that we sign extend to full pointer size
@@ -202,57 +252,123 @@
   return load;
 }
 
-Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
-  switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
+                                    bytecode_, operand_index, operand_scale()));
+  int operand_offset =
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
+  if (TargetSupportsUnalignedAccess()) {
+    return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
+                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+  } else {
+    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32());
+  }
+}
+
+Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
+                                    bytecode_, operand_index, operand_scale()));
+  int operand_offset =
+      Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
+  Node* load;
+  if (TargetSupportsUnalignedAccess()) {
+    load = Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
+                IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+  } else {
+    load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
+  }
+
+  // Ensure that we sign extend to full pointer size
+  if (kPointerSize == 8) {
+    load = ChangeInt32ToInt64(load);
+  }
+  return load;
+}
+
+Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
+                                                  OperandSize operand_size) {
+  DCHECK(!Bytecodes::IsUnsignedOperandType(
+      Bytecodes::GetOperandType(bytecode_, operand_index)));
+  switch (operand_size) {
     case OperandSize::kByte:
-      DCHECK_EQ(OperandType::kRegCount8,
-                Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperand(operand_index);
+      return BytecodeOperandSignedByte(operand_index);
     case OperandSize::kShort:
-      DCHECK_EQ(OperandType::kRegCount16,
-                Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperandShort(operand_index);
+      return BytecodeOperandSignedShort(operand_index);
+    case OperandSize::kQuad:
+      return BytecodeOperandSignedQuad(operand_index);
     case OperandSize::kNone:
       UNREACHABLE();
   }
   return nullptr;
 }
 
+Node* InterpreterAssembler::BytecodeUnsignedOperand(int operand_index,
+                                                    OperandSize operand_size) {
+  DCHECK(Bytecodes::IsUnsignedOperandType(
+      Bytecodes::GetOperandType(bytecode_, operand_index)));
+  switch (operand_size) {
+    case OperandSize::kByte:
+      return BytecodeOperandUnsignedByte(operand_index);
+    case OperandSize::kShort:
+      return BytecodeOperandUnsignedShort(operand_index);
+    case OperandSize::kQuad:
+      return BytecodeOperandUnsignedQuad(operand_index);
+    case OperandSize::kNone:
+      UNREACHABLE();
+  }
+  return nullptr;
+}
+
+Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
+  DCHECK_EQ(OperandType::kRegCount,
+            Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  return BytecodeUnsignedOperand(operand_index, operand_size);
+}
+
+Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
+  DCHECK_EQ(OperandType::kFlag8,
+            Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  DCHECK_EQ(operand_size, OperandSize::kByte);
+  return BytecodeUnsignedOperand(operand_index, operand_size);
+}
+
 Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
-  DCHECK_EQ(OperandType::kImm8,
+  DCHECK_EQ(OperandType::kImm,
             Bytecodes::GetOperandType(bytecode_, operand_index));
-  return BytecodeOperandSignExtended(operand_index);
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  return BytecodeSignedOperand(operand_index, operand_size);
 }
 
 Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
-  switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
-    case OperandSize::kByte:
-      DCHECK_EQ(OperandType::kIdx8,
-                Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperand(operand_index);
-    case OperandSize::kShort:
-      DCHECK_EQ(OperandType::kIdx16,
-                Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperandShort(operand_index);
-    case OperandSize::kNone:
-      UNREACHABLE();
-  }
-  return nullptr;
+  DCHECK(OperandType::kIdx ==
+         Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  return BytecodeUnsignedOperand(operand_index, operand_size);
 }
 
 Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
-  OperandType operand_type =
-      Bytecodes::GetOperandType(bytecode_, operand_index);
-  if (Bytecodes::IsRegisterOperandType(operand_type)) {
-    OperandSize operand_size = Bytecodes::SizeOfOperand(operand_type);
-    if (operand_size == OperandSize::kByte) {
-      return BytecodeOperandSignExtended(operand_index);
-    } else if (operand_size == OperandSize::kShort) {
-      return BytecodeOperandShortSignExtended(operand_index);
-    }
-  }
-  UNREACHABLE();
-  return nullptr;
+  DCHECK(Bytecodes::IsRegisterOperandType(
+      Bytecodes::GetOperandType(bytecode_, operand_index)));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  return BytecodeSignedOperand(operand_index, operand_size);
+}
+
+Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
+  DCHECK(OperandType::kRuntimeId ==
+         Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  DCHECK_EQ(operand_size, OperandSize::kShort);
+  return BytecodeUnsignedOperand(operand_index, operand_size);
 }
 
 Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
@@ -264,14 +380,6 @@
   return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
 }
 
-Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
-                                                  int index) {
-  Node* entry_offset =
-      IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
-                WordShl(Int32Constant(index), kPointerSizeLog2));
-  return Load(MachineType::AnyTagged(), fixed_array, entry_offset);
-}
-
 Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
   return Load(MachineType::AnyTagged(), object,
               IntPtrConstant(offset - kHeapObjectTag));
@@ -285,7 +393,7 @@
 Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
   Node* offset =
       IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
-                Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+                IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
   return Load(MachineType::AnyTagged(), context, offset);
 }
 
@@ -293,7 +401,7 @@
                                              Node* value) {
   Node* offset =
       IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
-                Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+                IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
   return Store(MachineRepresentation::kTagged, context, offset, value);
 }
 
@@ -311,8 +419,6 @@
 void InterpreterAssembler::CallPrologue() {
   StoreRegister(SmiTag(BytecodeOffset()),
                 InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
-  StoreRegister(BytecodeArrayTaggedPointer(),
-                InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer);
 
   if (FLAG_debug_code && !disable_stack_check_across_call_) {
     DCHECK(stack_pointer_before_call_ == nullptr);
@@ -368,7 +474,7 @@
   Node* function = IntPtrAdd(function_table, function_offset);
   Node* function_entry =
       Load(MachineType::Pointer(), function,
-           Int32Constant(offsetof(Runtime::Function, entry)));
+           IntPtrConstant(offsetof(Runtime::Function, entry)));
 
   return CallStub(callable.descriptor(), code_target, context, arg_count,
                   first_arg, function_entry, result_size);
@@ -405,7 +511,7 @@
 }
 
 Node* InterpreterAssembler::Advance(int delta) {
-  return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
+  return IntPtrAdd(BytecodeOffset(), IntPtrConstant(delta));
 }
 
 Node* InterpreterAssembler::Advance(Node* delta) {
@@ -438,18 +544,21 @@
 }
 
 void InterpreterAssembler::Dispatch() {
-  DispatchTo(Advance(Bytecodes::Size(bytecode_)));
+  DispatchTo(Advance(Bytecodes::Size(bytecode_, operand_scale_)));
 }
 
 void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
   Node* target_bytecode = Load(
       MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
+  if (kPointerSize == 8) {
+    target_bytecode = ChangeUint32ToUint64(target_bytecode);
+  }
 
   // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
   // from code object on every dispatch.
   Node* target_code_object =
       Load(MachineType::Pointer(), DispatchTableRawPointer(),
-           Word32Shl(target_bytecode, Int32Constant(kPointerSizeLog2)));
+           WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
 
   DispatchToBytecodeHandler(target_code_object, new_bytecode_offset);
 }
@@ -461,12 +570,46 @@
   }
 
   InterpreterDispatchDescriptor descriptor(isolate());
-  Node* args[] = {GetAccumulator(),          RegisterFileRawPointer(),
+  Node* args[] = {GetAccumulatorUnchecked(), RegisterFileRawPointer(),
                   bytecode_offset,           BytecodeArrayTaggedPointer(),
                   DispatchTableRawPointer(), GetContext()};
   TailCall(descriptor, handler, args, 0);
 }
 
+void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
+  // Dispatching a wide bytecode requires treating the prefix
+  // bytecode a base pointer into the dispatch table and dispatching
+  // the bytecode that follows relative to this base.
+  //
+  //   Indices 0-255 correspond to bytecodes with operand_scale == 0
+  //   Indices 256-511 correspond to bytecodes with operand_scale == 1
+  //   Indices 512-767 correspond to bytecodes with operand_scale == 2
+  Node* next_bytecode_offset = Advance(1);
+  Node* next_bytecode = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+                             next_bytecode_offset);
+  if (kPointerSize == 8) {
+    next_bytecode = ChangeUint32ToUint64(next_bytecode);
+  }
+  Node* base_index;
+  switch (operand_scale) {
+    case OperandScale::kDouble:
+      base_index = IntPtrConstant(1 << kBitsPerByte);
+      break;
+    case OperandScale::kQuadruple:
+      base_index = IntPtrConstant(2 << kBitsPerByte);
+      break;
+    default:
+      UNREACHABLE();
+      base_index = nullptr;
+  }
+  Node* target_index = IntPtrAdd(base_index, next_bytecode);
+  Node* target_code_object =
+      Load(MachineType::Pointer(), DispatchTableRawPointer(),
+           WordShl(target_index, kPointerSizeLog2));
+
+  DispatchToBytecodeHandler(target_code_object, next_bytecode_offset);
+}
+
 void InterpreterAssembler::InterpreterReturn() {
   // TODO(rmcilroy): Investigate whether it is worth supporting self
   // optimization of primitive functions like FullCodegen.
@@ -505,27 +648,29 @@
 void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
   disable_stack_check_across_call_ = true;
   Node* abort_id = SmiTag(Int32Constant(bailout_reason));
-  Node* ret_value = CallRuntime(Runtime::kAbort, GetContext(), abort_id);
+  CallRuntime(Runtime::kAbort, GetContext(), abort_id);
   disable_stack_check_across_call_ = false;
-  // Unreached, but keeps turbofan happy.
-  Return(ret_value);
 }
 
 void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
                                                BailoutReason bailout_reason) {
   CodeStubAssembler::Label match(this);
   CodeStubAssembler::Label no_match(this);
+  CodeStubAssembler::Label end(this);
 
   Node* condition = WordEqual(lhs, rhs);
   Branch(condition, &match, &no_match);
   Bind(&no_match);
   Abort(bailout_reason);
+  Goto(&end);
   Bind(&match);
+  Goto(&end);
+  Bind(&end);
 }
 
 void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
   CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
-              SmiTag(BytecodeOffset()), GetAccumulator());
+              SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
 }
 
 // static
@@ -534,7 +679,8 @@
   return false;
 #elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
   return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
-#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87 || \
+    V8_TARGET_ARCH_S390
   return true;
 #else
 #error "Unknown Architecture"