Revert "Revert "Upgrade to 5.0.71.48"" DO NOT MERGE

This reverts commit f2e3994fa5148cc3d9946666f0b0596290192b0e,
and updates the x64 makefile properly so it doesn't break that
build.

FPIIM-449

Change-Id: Ib83e35bfbae6af627451c926a9650ec57c045605
(cherry picked from commit 109988c7ccb6f3fd1a58574fa3dfb88beaef6632)
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index 574602b..eb88342 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -4,12 +4,13 @@
 
 #include "src/interpreter/interpreter.h"
 
+#include "src/ast/prettyprinter.h"
 #include "src/code-factory.h"
 #include "src/compiler.h"
-#include "src/compiler/interpreter-assembler.h"
 #include "src/factory.h"
 #include "src/interpreter/bytecode-generator.h"
 #include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter-assembler.h"
 #include "src/zone.h"
 
 namespace v8 {
@@ -20,52 +21,77 @@
 
 #define __ assembler->
 
-
-Interpreter::Interpreter(Isolate* isolate)
-    : isolate_(isolate) {}
-
-
-// static
-Handle<FixedArray> Interpreter::CreateUninitializedInterpreterTable(
-    Isolate* isolate) {
-  Handle<FixedArray> handler_table = isolate->factory()->NewFixedArray(
-      static_cast<int>(Bytecode::kLast) + 1, TENURED);
-  // We rely on the interpreter handler table being immovable, so check that
-  // it was allocated on the first page (which is always immovable).
-  DCHECK(isolate->heap()->old_space()->FirstPage()->Contains(
-      handler_table->address()));
-  return handler_table;
+Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
+  memset(&dispatch_table_, 0, sizeof(dispatch_table_));
 }
 
-
 void Interpreter::Initialize() {
   DCHECK(FLAG_ignition);
-  Handle<FixedArray> handler_table = isolate_->factory()->interpreter_table();
-  if (!IsInterpreterTableInitialized(handler_table)) {
-    Zone zone;
-    HandleScope scope(isolate_);
+  if (IsDispatchTableInitialized()) return;
+  Zone zone;
+  HandleScope scope(isolate_);
 
-#define GENERATE_CODE(Name, ...)                                      \
-    {                                                                 \
-      compiler::InterpreterAssembler assembler(isolate_, &zone,       \
-                                               Bytecode::k##Name);    \
-      Do##Name(&assembler);                                           \
-      Handle<Code> code = assembler.GenerateCode();                   \
-      handler_table->set(static_cast<int>(Bytecode::k##Name), *code); \
-    }
-    BYTECODE_LIST(GENERATE_CODE)
-#undef GENERATE_CODE
+#define GENERATE_CODE(Name, ...)                                        \
+  {                                                                     \
+    InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name); \
+    Do##Name(&assembler);                                               \
+    Handle<Code> code = assembler.GenerateCode();                       \
+    TraceCodegen(code, #Name);                                          \
+    dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] = *code;      \
   }
+  BYTECODE_LIST(GENERATE_CODE)
+#undef GENERATE_CODE
 }
 
+Code* Interpreter::GetBytecodeHandler(Bytecode bytecode) {
+  DCHECK(IsDispatchTableInitialized());
+  return dispatch_table_[Bytecodes::ToByte(bytecode)];
+}
+
+void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
+  v->VisitPointers(
+      reinterpret_cast<Object**>(&dispatch_table_[0]),
+      reinterpret_cast<Object**>(&dispatch_table_[0] + kDispatchTableSize));
+}
+
+// static
+int Interpreter::InterruptBudget() {
+  // TODO(ignition): Tune code size multiplier.
+  const int kCodeSizeMultiplier = 32;
+  return FLAG_interrupt_budget * kCodeSizeMultiplier;
+}
 
 bool Interpreter::MakeBytecode(CompilationInfo* info) {
+  if (FLAG_print_bytecode || FLAG_print_source || FLAG_print_ast) {
+    OFStream os(stdout);
+    base::SmartArrayPointer<char> name = info->GetDebugName();
+    os << "[generating bytecode for function: " << info->GetDebugName().get()
+       << "]" << std::endl
+       << std::flush;
+  }
+
+#ifdef DEBUG
+  if (info->parse_info() && FLAG_print_source) {
+    OFStream os(stdout);
+    os << "--- Source from AST ---" << std::endl
+       << PrettyPrinter(info->isolate()).PrintProgram(info->literal())
+       << std::endl
+       << std::flush;
+  }
+
+  if (info->parse_info() && FLAG_print_ast) {
+    OFStream os(stdout);
+    os << "--- AST ---" << std::endl
+       << AstPrinter(info->isolate()).PrintProgram(info->literal()) << std::endl
+       << std::flush;
+  }
+#endif  // DEBUG
+
   BytecodeGenerator generator(info->isolate(), info->zone());
   info->EnsureFeedbackVector();
   Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
   if (FLAG_print_bytecode) {
     OFStream os(stdout);
-    os << "Function: " << info->GetDebugName().get() << std::endl;
     bytecodes->Print(os);
     os << std::flush;
   }
@@ -75,18 +101,28 @@
   return true;
 }
 
-
-bool Interpreter::IsInterpreterTableInitialized(
-    Handle<FixedArray> handler_table) {
-  DCHECK(handler_table->length() == static_cast<int>(Bytecode::kLast) + 1);
-  return handler_table->get(0) != isolate_->heap()->undefined_value();
+bool Interpreter::IsDispatchTableInitialized() {
+  if (FLAG_trace_ignition) {
+    // Regenerate table to add bytecode tracing operations.
+    return false;
+  }
+  return dispatch_table_[0] != nullptr;
 }
 
+void Interpreter::TraceCodegen(Handle<Code> code, const char* name) {
+#ifdef ENABLE_DISASSEMBLER
+  if (FLAG_trace_ignition_codegen) {
+    OFStream os(stdout);
+    code->Disassemble(name, os);
+    os << std::flush;
+  }
+#endif  // ENABLE_DISASSEMBLER
+}
 
 // LdaZero
 //
 // Load literal '0' into the accumulator.
-void Interpreter::DoLdaZero(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaZero(InterpreterAssembler* assembler) {
   Node* zero_value = __ NumberConstant(0.0);
   __ SetAccumulator(zero_value);
   __ Dispatch();
@@ -96,15 +132,14 @@
 // LdaSmi8 <imm8>
 //
 // Load an 8-bit integer literal into the accumulator as a Smi.
-void Interpreter::DoLdaSmi8(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaSmi8(InterpreterAssembler* assembler) {
   Node* raw_int = __ BytecodeOperandImm(0);
   Node* smi_int = __ SmiTag(raw_int);
   __ SetAccumulator(smi_int);
   __ Dispatch();
 }
 
-
-void Interpreter::DoLoadConstant(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLoadConstant(InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   __ SetAccumulator(constant);
@@ -115,7 +150,7 @@
 // LdaConstant <idx>
 //
 // Load constant literal at |idx| in the constant pool into the accumulator.
-void Interpreter::DoLdaConstant(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) {
   DoLoadConstant(assembler);
 }
 
@@ -123,7 +158,7 @@
 // LdaConstantWide <idx>
 //
 // Load constant literal at |idx| in the constant pool into the accumulator.
-void Interpreter::DoLdaConstantWide(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaConstantWide(InterpreterAssembler* assembler) {
   DoLoadConstant(assembler);
 }
 
@@ -131,7 +166,7 @@
 // LdaUndefined
 //
 // Load Undefined into the accumulator.
-void Interpreter::DoLdaUndefined(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) {
   Node* undefined_value =
       __ HeapConstant(isolate_->factory()->undefined_value());
   __ SetAccumulator(undefined_value);
@@ -142,7 +177,7 @@
 // LdaNull
 //
 // Load Null into the accumulator.
-void Interpreter::DoLdaNull(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaNull(InterpreterAssembler* assembler) {
   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
   __ SetAccumulator(null_value);
   __ Dispatch();
@@ -152,7 +187,7 @@
 // LdaTheHole
 //
 // Load TheHole into the accumulator.
-void Interpreter::DoLdaTheHole(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) {
   Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
   __ SetAccumulator(the_hole_value);
   __ Dispatch();
@@ -162,7 +197,7 @@
 // LdaTrue
 //
 // Load True into the accumulator.
-void Interpreter::DoLdaTrue(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) {
   Node* true_value = __ HeapConstant(isolate_->factory()->true_value());
   __ SetAccumulator(true_value);
   __ Dispatch();
@@ -172,7 +207,7 @@
 // LdaFalse
 //
 // Load False into the accumulator.
-void Interpreter::DoLdaFalse(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) {
   Node* false_value = __ HeapConstant(isolate_->factory()->false_value());
   __ SetAccumulator(false_value);
   __ Dispatch();
@@ -182,7 +217,7 @@
 // Ldar <src>
 //
 // Load accumulator with value from register <src>.
-void Interpreter::DoLdar(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdar(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* value = __ LoadRegister(reg_index);
   __ SetAccumulator(value);
@@ -193,7 +228,7 @@
 // Star <dst>
 //
 // Store accumulator to register <dst>.
-void Interpreter::DoStar(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStar(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* accumulator = __ GetAccumulator();
   __ StoreRegister(accumulator, reg_index);
@@ -201,32 +236,10 @@
 }
 
 
-// Exchange <reg8> <reg16>
-//
-// Exchange two registers.
-void Interpreter::DoExchange(compiler::InterpreterAssembler* assembler) {
-  Node* reg0_index = __ BytecodeOperandReg(0);
-  Node* reg1_index = __ BytecodeOperandReg(1);
-  Node* reg0_value = __ LoadRegister(reg0_index);
-  Node* reg1_value = __ LoadRegister(reg1_index);
-  __ StoreRegister(reg1_value, reg0_index);
-  __ StoreRegister(reg0_value, reg1_index);
-  __ Dispatch();
-}
-
-
-// ExchangeWide <reg16> <reg16>
-//
-// Exchange two registers.
-void Interpreter::DoExchangeWide(compiler::InterpreterAssembler* assembler) {
-  return DoExchange(assembler);
-}
-
-
 // Mov <src> <dst>
 //
 // Stores the value of register <src> to register <dst>.
-void Interpreter::DoMov(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoMov(InterpreterAssembler* assembler) {
   Node* src_index = __ BytecodeOperandReg(0);
   Node* src_value = __ LoadRegister(src_index);
   Node* dst_index = __ BytecodeOperandReg(1);
@@ -235,8 +248,14 @@
 }
 
 
-void Interpreter::DoLoadGlobal(Callable ic,
-                               compiler::InterpreterAssembler* assembler) {
+// MovWide <src> <dst>
+//
+// Stores the value of register <src> to register <dst>.
+void Interpreter::DoMovWide(InterpreterAssembler* assembler) {
+  DoMov(assembler);
+}
+
+void Interpreter::DoLoadGlobal(Callable ic, InterpreterAssembler* assembler) {
   // Get the global object.
   Node* context = __ GetContext();
   Node* native_context =
@@ -250,109 +269,54 @@
   Node* raw_slot = __ BytecodeOperandIdx(1);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Node* result = __ CallIC(ic.descriptor(), code_target, global, name, smi_slot,
-                           type_feedback_vector);
+  Node* result = __ CallStub(ic.descriptor(), code_target, context, global,
+                             name, smi_slot, type_feedback_vector);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-
-// LdaGlobalSloppy <name_index> <slot>
+// LdaGlobal <name_index> <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
+// accumulator using FeedBackVector slot <slot> outside of a typeof.
+void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
+                                                   UNINITIALIZED);
   DoLoadGlobal(ic, assembler);
 }
 
-
-// LdaGlobalSloppy <name_index> <slot>
+// LdaGlobalInsideTypeof <name_index> <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalStrict(compiler::InterpreterAssembler* assembler) {
+// accumulator using FeedBackVector slot <slot> inside of a typeof.
+void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+                                                   UNINITIALIZED);
+  DoLoadGlobal(ic, assembler);
+}
+
+// LdaGlobalWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> outside of a typeof.
+void Interpreter::DoLdaGlobalWide(InterpreterAssembler* assembler) {
   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
+                                                   UNINITIALIZED);
   DoLoadGlobal(ic, assembler);
 }
 
-
-// LdaGlobalInsideTypeofSloppy <name_index> <slot>
+// LdaGlobalInsideTypeofWide <name_index> <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalInsideTypeofSloppy(
-    compiler::InterpreterAssembler* assembler) {
+// accumulator using FeedBackVector slot <slot> inside of a typeof.
+void Interpreter::DoLdaGlobalInsideTypeofWide(InterpreterAssembler* assembler) {
   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
+                                                   UNINITIALIZED);
   DoLoadGlobal(ic, assembler);
 }
 
 
-// LdaGlobalInsideTypeofStrict <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalInsideTypeofStrict(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalSloppyWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalSloppyWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalStrictWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalInsideTypeofSloppyWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalInsideTypeofSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalInsideTypeofSloppyWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalInsideTypeofStrictWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-void Interpreter::DoStoreGlobal(Callable ic,
-                                compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreGlobal(Callable ic, InterpreterAssembler* assembler) {
   // Get the global object.
   Node* context = __ GetContext();
   Node* native_context =
@@ -367,8 +331,8 @@
   Node* raw_slot = __ BytecodeOperandIdx(1);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  __ CallIC(ic.descriptor(), code_target, global, name, value, smi_slot,
-            type_feedback_vector);
+  __ CallStub(ic.descriptor(), code_target, context, global, name, value,
+              smi_slot, type_feedback_vector);
 
   __ Dispatch();
 }
@@ -378,7 +342,7 @@
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoStaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoStoreGlobal(ic, assembler);
@@ -389,7 +353,7 @@
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoStaGlobalStrict(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoStoreGlobal(ic, assembler);
@@ -400,8 +364,7 @@
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoStaGlobalSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalSloppyWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoStoreGlobal(ic, assembler);
@@ -412,8 +375,7 @@
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoStaGlobalStrictWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalStrictWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoStoreGlobal(ic, assembler);
@@ -423,7 +385,7 @@
 // LdaContextSlot <context> <slot_index>
 //
 // Load the object in |slot_index| of |context| into the accumulator.
-void Interpreter::DoLdaContextSlot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* context = __ LoadRegister(reg_index);
   Node* slot_index = __ BytecodeOperandIdx(1);
@@ -436,8 +398,7 @@
 // LdaContextSlotWide <context> <slot_index>
 //
 // Load the object in |slot_index| of |context| into the accumulator.
-void Interpreter::DoLdaContextSlotWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaContextSlotWide(InterpreterAssembler* assembler) {
   DoLdaContextSlot(assembler);
 }
 
@@ -445,7 +406,7 @@
 // StaContextSlot <context> <slot_index>
 //
 // Stores the object in the accumulator into |slot_index| of |context|.
-void Interpreter::DoStaContextSlot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* context = __ LoadRegister(reg_index);
@@ -458,19 +419,16 @@
 // StaContextSlot <context> <slot_index>
 //
 // Stores the object in the accumulator into |slot_index| of |context|.
-void Interpreter::DoStaContextSlotWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaContextSlotWide(InterpreterAssembler* assembler) {
   DoStaContextSlot(assembler);
 }
 
-
 void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
-                                   compiler::InterpreterAssembler* assembler) {
+                                   InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* name = __ LoadConstantPoolEntry(index);
   Node* context = __ GetContext();
-  Node* result_pair = __ CallRuntime(function_id, context, name);
-  Node* result = __ Projection(0, result_pair);
+  Node* result = __ CallRuntime(function_id, context, name);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -480,7 +438,7 @@
 //
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically.
-void Interpreter::DoLdaLookupSlot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) {
   DoLoadLookupSlot(Runtime::kLoadLookupSlot, assembler);
 }
 
@@ -489,9 +447,8 @@
 //
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically without causing a NoReferenceError.
-void Interpreter::DoLdaLookupSlotInsideTypeof(
-    compiler::InterpreterAssembler* assembler) {
-  DoLoadLookupSlot(Runtime::kLoadLookupSlotNoReferenceError, assembler);
+void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) {
+  DoLoadLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
 }
 
 
@@ -499,8 +456,7 @@
 //
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically.
-void Interpreter::DoLdaLookupSlotWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaLookupSlotWide(InterpreterAssembler* assembler) {
   DoLdaLookupSlot(assembler);
 }
 
@@ -510,20 +466,20 @@
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically without causing a NoReferenceError.
 void Interpreter::DoLdaLookupSlotInsideTypeofWide(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   DoLdaLookupSlotInsideTypeof(assembler);
 }
 
-
 void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
-                                    compiler::InterpreterAssembler* assembler) {
+                                    InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
   Node* index = __ BytecodeOperandIdx(0);
   Node* name = __ LoadConstantPoolEntry(index);
   Node* context = __ GetContext();
-  Node* language_mode_node = __ NumberConstant(language_mode);
-  Node* result = __ CallRuntime(Runtime::kStoreLookupSlot, value, context, name,
-                                language_mode_node);
+  Node* result = __ CallRuntime(is_strict(language_mode)
+                                    ? Runtime::kStoreLookupSlot_Strict
+                                    : Runtime::kStoreLookupSlot_Sloppy,
+                                context, name, value);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -533,8 +489,7 @@
 //
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in sloppy mode.
-void Interpreter::DoStaLookupSlotSloppy(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) {
   DoStoreLookupSlot(LanguageMode::SLOPPY, assembler);
 }
 
@@ -543,8 +498,7 @@
 //
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in strict mode.
-void Interpreter::DoStaLookupSlotStrict(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
   DoStoreLookupSlot(LanguageMode::STRICT, assembler);
 }
 
@@ -553,8 +507,7 @@
 //
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in sloppy mode.
-void Interpreter::DoStaLookupSlotSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotSloppyWide(InterpreterAssembler* assembler) {
   DoStaLookupSlotSloppy(assembler);
 }
 
@@ -563,14 +516,11 @@
 //
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in strict mode.
-void Interpreter::DoStaLookupSlotStrictWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotStrictWide(InterpreterAssembler* assembler) {
   DoStaLookupSlotStrict(assembler);
 }
 
-
-void Interpreter::DoLoadIC(Callable ic,
-                           compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLoadIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* register_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(register_index);
@@ -579,61 +529,35 @@
   Node* raw_slot = __ BytecodeOperandIdx(2);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Node* result = __ CallIC(ic.descriptor(), code_target, object, name, smi_slot,
-                           type_feedback_vector);
+  Node* context = __ GetContext();
+  Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
+                             name, smi_slot, type_feedback_vector);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-
-// LoadICSloppy <object> <name_index> <slot>
+// LoadIC <object> <name_index> <slot>
 //
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICSloppy(compiler::InterpreterAssembler* assembler) {
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLoadIC(InterpreterAssembler* assembler) {
   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
+                                                   UNINITIALIZED);
+  DoLoadIC(ic, assembler);
+}
+
+// LoadICWide <object> <name_index> <slot>
+//
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLoadICWide(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+                                                   UNINITIALIZED);
   DoLoadIC(ic, assembler);
 }
 
 
-// LoadICStrict <object> <name_index> <slot>
-//
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICStrict(compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadIC(ic, assembler);
-}
-
-
-// LoadICSloppyWide <object> <name_index> <slot>
-//
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
-  DoLoadIC(ic, assembler);
-}
-
-
-// LoadICStrictWide <object> <name_index> <slot>
-//
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICStrictWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadIC(ic, assembler);
-}
-
-
-void Interpreter::DoKeyedLoadIC(Callable ic,
-                                compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(reg_index);
@@ -641,63 +565,35 @@
   Node* raw_slot = __ BytecodeOperandIdx(1);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Node* result = __ CallIC(ic.descriptor(), code_target, object, name, smi_slot,
-                           type_feedback_vector);
+  Node* context = __ GetContext();
+  Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
+                             name, smi_slot, type_feedback_vector);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-
-// KeyedLoadICSloppy <object> <slot>
+// KeyedLoadIC <object> <slot>
 //
-// Calls the sloppy mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICSloppy(
-    compiler::InterpreterAssembler* assembler) {
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator.
+void Interpreter::DoKeyedLoadIC(InterpreterAssembler* assembler) {
   Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, UNINITIALIZED);
+  DoKeyedLoadIC(ic, assembler);
+}
+
+// KeyedLoadICWide <object> <slot>
+//
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator.
+void Interpreter::DoKeyedLoadICWide(InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, UNINITIALIZED);
   DoKeyedLoadIC(ic, assembler);
 }
 
 
-// KeyedLoadICStrict <object> <slot>
-//
-// Calls the strict mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICStrict(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
-  DoKeyedLoadIC(ic, assembler);
-}
-
-
-// KeyedLoadICSloppyWide <object> <slot>
-//
-// Calls the sloppy mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
-  DoKeyedLoadIC(ic, assembler);
-}
-
-
-// KeyedLoadICStrictWide <object> <slot>
-//
-// Calls the strict mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICStrictWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
-  DoKeyedLoadIC(ic, assembler);
-}
-
-
-void Interpreter::DoStoreIC(Callable ic,
-                            compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* object_reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(object_reg_index);
@@ -707,8 +603,9 @@
   Node* raw_slot = __ BytecodeOperandIdx(2);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  __ CallIC(ic.descriptor(), code_target, object, name, value, smi_slot,
-            type_feedback_vector);
+  Node* context = __ GetContext();
+  __ CallStub(ic.descriptor(), code_target, context, object, name, value,
+              smi_slot, type_feedback_vector);
   __ Dispatch();
 }
 
@@ -718,7 +615,7 @@
 // Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICSloppy(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICSloppy(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoStoreIC(ic, assembler);
@@ -730,7 +627,7 @@
 // Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICStrict(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICStrict(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoStoreIC(ic, assembler);
@@ -742,8 +639,7 @@
 // Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICSloppyWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoStoreIC(ic, assembler);
@@ -755,16 +651,13 @@
 // Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICStrictWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICStrictWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoStoreIC(ic, assembler);
 }
 
-
-void Interpreter::DoKeyedStoreIC(Callable ic,
-                                 compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* object_reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(object_reg_index);
@@ -774,8 +667,9 @@
   Node* raw_slot = __ BytecodeOperandIdx(2);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  __ CallIC(ic.descriptor(), code_target, object, name, value, smi_slot,
-            type_feedback_vector);
+  Node* context = __ GetContext();
+  __ CallStub(ic.descriptor(), code_target, context, object, name, value,
+              smi_slot, type_feedback_vector);
   __ Dispatch();
 }
 
@@ -784,8 +678,7 @@
 //
 // Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICSloppy(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICSloppy(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoKeyedStoreIC(ic, assembler);
@@ -796,8 +689,7 @@
 //
 // Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICStrict(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICStrict(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoKeyedStoreIC(ic, assembler);
@@ -808,8 +700,7 @@
 //
 // Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICSloppyWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoKeyedStoreIC(ic, assembler);
@@ -820,22 +711,22 @@
 //
 // Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICStrictWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICStrictWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoKeyedStoreIC(ic, assembler);
 }
 
-
 // PushContext <context>
 //
-// Pushes the accumulator as the current context, and saves it in <context>
-void Interpreter::DoPushContext(compiler::InterpreterAssembler* assembler) {
+// Saves the current context in <context>, and pushes the accumulator as the
+// new current context.
+void Interpreter::DoPushContext(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
-  Node* context = __ GetAccumulator();
-  __ SetContext(context);
-  __ StoreRegister(context, reg_index);
+  Node* new_context = __ GetAccumulator();
+  Node* old_context = __ GetContext();
+  __ StoreRegister(old_context, reg_index);
+  __ SetContext(new_context);
   __ Dispatch();
 }
 
@@ -843,22 +734,22 @@
 // PopContext <context>
 //
 // Pops the current context and sets <context> as the new context.
-void Interpreter::DoPopContext(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoPopContext(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* context = __ LoadRegister(reg_index);
   __ SetContext(context);
   __ Dispatch();
 }
 
-
 void Interpreter::DoBinaryOp(Runtime::FunctionId function_id,
-                             compiler::InterpreterAssembler* assembler) {
+                             InterpreterAssembler* assembler) {
   // TODO(rmcilroy): Call ICs which back-patch bytecode with type specialized
   // operations, instead of calling builtins directly.
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* lhs = __ LoadRegister(reg_index);
   Node* rhs = __ GetAccumulator();
-  Node* result = __ CallRuntime(function_id, lhs, rhs);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(function_id, context, lhs, rhs);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -867,7 +758,7 @@
 // Add <src>
 //
 // Add register <src> to accumulator.
-void Interpreter::DoAdd(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoAdd(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kAdd, assembler);
 }
 
@@ -875,7 +766,7 @@
 // Sub <src>
 //
 // Subtract register <src> from accumulator.
-void Interpreter::DoSub(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoSub(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kSubtract, assembler);
 }
 
@@ -883,7 +774,7 @@
 // Mul <src>
 //
 // Multiply accumulator by register <src>.
-void Interpreter::DoMul(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoMul(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kMultiply, assembler);
 }
 
@@ -891,7 +782,7 @@
 // Div <src>
 //
 // Divide register <src> by accumulator.
-void Interpreter::DoDiv(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDiv(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kDivide, assembler);
 }
 
@@ -899,7 +790,7 @@
 // Mod <src>
 //
 // Modulo register <src> by accumulator.
-void Interpreter::DoMod(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoMod(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kModulus, assembler);
 }
 
@@ -907,7 +798,7 @@
 // BitwiseOr <src>
 //
 // BitwiseOr register <src> to accumulator.
-void Interpreter::DoBitwiseOr(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kBitwiseOr, assembler);
 }
 
@@ -915,7 +806,7 @@
 // BitwiseXor <src>
 //
 // BitwiseXor register <src> to accumulator.
-void Interpreter::DoBitwiseXor(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kBitwiseXor, assembler);
 }
 
@@ -923,7 +814,7 @@
 // BitwiseAnd <src>
 //
 // BitwiseAnd register <src> to accumulator.
-void Interpreter::DoBitwiseAnd(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kBitwiseAnd, assembler);
 }
 
@@ -934,7 +825,7 @@
 // Register <src> is converted to an int32 and the accumulator to uint32
 // before the operation. 5 lsb bits from the accumulator are used as count
 // i.e. <src> << (accumulator & 0x1F).
-void Interpreter::DoShiftLeft(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kShiftLeft, assembler);
 }
 
@@ -945,7 +836,7 @@
 // Result is sign extended. Register <src> is converted to an int32 and the
 // accumulator to uint32 before the operation. 5 lsb bits from the accumulator
 // are used as count i.e. <src> >> (accumulator & 0x1F).
-void Interpreter::DoShiftRight(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoShiftRight(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kShiftRight, assembler);
 }
 
@@ -956,17 +847,16 @@
 // Result is zero-filled. The accumulator and register <src> are converted to
 // uint32 before the operation 5 lsb bits from the accumulator are used as
 // count i.e. <src> << (accumulator & 0x1F).
-void Interpreter::DoShiftRightLogical(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kShiftRightLogical, assembler);
 }
 
-
 void Interpreter::DoCountOp(Runtime::FunctionId function_id,
-                            compiler::InterpreterAssembler* assembler) {
+                            InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
   Node* one = __ NumberConstant(1);
-  Node* result = __ CallRuntime(function_id, value, one);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(function_id, context, value, one);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -975,7 +865,7 @@
 // Inc
 //
 // Increments value in the accumulator by one.
-void Interpreter::DoInc(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoInc(InterpreterAssembler* assembler) {
   DoCountOp(Runtime::kAdd, assembler);
 }
 
@@ -983,7 +873,7 @@
 // Dec
 //
 // Decrements value in the accumulator by one.
-void Interpreter::DoDec(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDec(InterpreterAssembler* assembler) {
   DoCountOp(Runtime::kSubtract, assembler);
 }
 
@@ -992,9 +882,11 @@
 //
 // Perform logical-not on the accumulator, first casting the
 // accumulator to a boolean value if required.
-void Interpreter::DoLogicalNot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kInterpreterLogicalNot, accumulator);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallRuntime(Runtime::kInterpreterLogicalNot, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1004,20 +896,22 @@
 //
 // Load the accumulator with the string representating type of the
 // object in the accumulator.
-void Interpreter::DoTypeOf(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kInterpreterTypeOf, accumulator);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallRuntime(Runtime::kInterpreterTypeOf, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-
 void Interpreter::DoDelete(Runtime::FunctionId function_id,
-                           compiler::InterpreterAssembler* assembler) {
+                           InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(reg_index);
   Node* key = __ GetAccumulator();
-  Node* result = __ CallRuntime(function_id, object, key);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(function_id, context, object, key);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1027,8 +921,7 @@
 //
 // Delete the property specified in the accumulator from the object
 // referenced by the register operand following strict mode semantics.
-void Interpreter::DoDeletePropertyStrict(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) {
   DoDelete(Runtime::kDeleteProperty_Strict, assembler);
 }
 
@@ -1037,34 +930,23 @@
 //
 // Delete the property specified in the accumulator from the object
 // referenced by the register operand following sloppy mode semantics.
-void Interpreter::DoDeletePropertySloppy(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) {
   DoDelete(Runtime::kDeleteProperty_Sloppy, assembler);
 }
 
-
-// DeleteLookupSlot
-//
-// Delete the variable with the name specified in the accumulator by dynamically
-// looking it up.
-void Interpreter::DoDeleteLookupSlot(
-    compiler::InterpreterAssembler* assembler) {
-  Node* name = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(Runtime::kDeleteLookupSlot, context, name);
-  __ SetAccumulator(result);
-  __ Dispatch();
-}
-
-
-void Interpreter::DoJSCall(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJSCall(InterpreterAssembler* assembler,
+                           TailCallMode tail_call_mode) {
   Node* function_reg = __ BytecodeOperandReg(0);
   Node* function = __ LoadRegister(function_reg);
   Node* receiver_reg = __ BytecodeOperandReg(1);
-  Node* first_arg = __ RegisterLocation(receiver_reg);
-  Node* args_count = __ BytecodeOperandCount(2);
-  // TODO(rmcilroy): Use the call type feedback slot to call via CallIC.
-  Node* result = __ CallJS(function, first_arg, args_count);
+  Node* receiver_arg = __ RegisterLocation(receiver_reg);
+  Node* receiver_args_count = __ BytecodeOperandCount(2);
+  Node* receiver_count = __ Int32Constant(1);
+  Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
+  Node* context = __ GetContext();
+  // TODO(rmcilroy): Use the call type feedback slot to call via CallStub.
+  Node* result =
+      __ CallJS(function, context, receiver_arg, args_count, tail_call_mode);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1074,8 +956,8 @@
 //
 // Call a JSfunction or Callable in |callable| with the |receiver| and
 // |arg_count| arguments in subsequent registers.
-void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
-  DoJSCall(assembler);
+void Interpreter::DoCall(InterpreterAssembler* assembler) {
+  DoJSCall(assembler, TailCallMode::kDisallow);
 }
 
 
@@ -1083,8 +965,35 @@
 //
 // Call a JSfunction or Callable in |callable| with the |receiver| and
 // |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallWide(compiler::InterpreterAssembler* assembler) {
-  DoJSCall(assembler);
+void Interpreter::DoCallWide(InterpreterAssembler* assembler) {
+  DoJSCall(assembler, TailCallMode::kDisallow);
+}
+
+// TailCall <callable> <receiver> <arg_count>
+//
+// Tail call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoTailCall(InterpreterAssembler* assembler) {
+  DoJSCall(assembler, TailCallMode::kAllow);
+}
+
+// TailCallWide <callable> <receiver> <arg_count>
+//
+// Tail call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoTailCallWide(InterpreterAssembler* assembler) {
+  DoJSCall(assembler, TailCallMode::kAllow);
+}
+
+void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) {
+  Node* function_id = __ BytecodeOperandIdx(0);
+  Node* first_arg_reg = __ BytecodeOperandReg(1);
+  Node* first_arg = __ RegisterLocation(first_arg_reg);
+  Node* args_count = __ BytecodeOperandCount(2);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count);
+  __ SetAccumulator(result);
+  __ Dispatch();
 }
 
 
@@ -1093,13 +1002,37 @@
 // Call the runtime function |function_id| with the first argument in
 // register |first_arg| and |arg_count| arguments in subsequent
 // registers.
-void Interpreter::DoCallRuntime(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) {
+  DoCallRuntimeCommon(assembler);
+}
+
+
+// CallRuntime <function_id> <first_arg> <arg_count>
+//
+// Call the runtime function |function_id| with the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers.
+void Interpreter::DoCallRuntimeWide(InterpreterAssembler* assembler) {
+  DoCallRuntimeCommon(assembler);
+}
+
+void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) {
+  // Call the runtime function.
   Node* function_id = __ BytecodeOperandIdx(0);
   Node* first_arg_reg = __ BytecodeOperandReg(1);
   Node* first_arg = __ RegisterLocation(first_arg_reg);
   Node* args_count = __ BytecodeOperandCount(2);
-  Node* result = __ CallRuntime(function_id, first_arg, args_count);
-  __ SetAccumulator(result);
+  Node* context = __ GetContext();
+  Node* result_pair =
+      __ CallRuntimeN(function_id, context, first_arg, args_count, 2);
+
+  // Store the results in <first_return> and <first_return + 1>
+  Node* first_return_reg = __ BytecodeOperandReg(3);
+  Node* second_return_reg = __ NextRegister(first_return_reg);
+  Node* result0 = __ Projection(0, result_pair);
+  Node* result1 = __ Projection(1, result_pair);
+  __ StoreRegister(result0, first_return_reg);
+  __ StoreRegister(result1, second_return_reg);
   __ Dispatch();
 }
 
@@ -1110,36 +1043,28 @@
 // first argument in register |first_arg| and |arg_count| arguments in
 // subsequent registers. Returns the result in <first_return> and
 // <first_return + 1>
-void Interpreter::DoCallRuntimeForPair(
-    compiler::InterpreterAssembler* assembler) {
-  // Call the runtime function.
-  Node* function_id = __ BytecodeOperandIdx(0);
-  Node* first_arg_reg = __ BytecodeOperandReg(1);
-  Node* first_arg = __ RegisterLocation(first_arg_reg);
-  Node* args_count = __ BytecodeOperandCount(2);
-  Node* result_pair = __ CallRuntime(function_id, first_arg, args_count, 2);
-
-  // Store the results in <first_return> and <first_return + 1>
-  Node* first_return_reg = __ BytecodeOperandReg(3);
-  Node* second_return_reg = __ NextRegister(first_return_reg);
-  Node* result0 = __ Projection(0, result_pair);
-  Node* result1 = __ Projection(1, result_pair);
-  __ StoreRegister(result0, first_return_reg);
-  __ StoreRegister(result1, second_return_reg);
-
-  __ Dispatch();
+void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) {
+  DoCallRuntimeForPairCommon(assembler);
 }
 
 
-// CallJSRuntime <context_index> <receiver> <arg_count>
+// CallRuntimeForPairWide <function_id> <first_arg> <arg_count> <first_return>
 //
-// Call the JS runtime function that has the |context_index| with the receiver
-// in register |receiver| and |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallJSRuntime(compiler::InterpreterAssembler* assembler) {
+// Call the runtime function |function_id| which returns a pair, with the
+// first argument in register |first_arg| and |arg_count| arguments in
+// subsequent registers. Returns the result in <first_return> and
+// <first_return + 1>
+void Interpreter::DoCallRuntimeForPairWide(InterpreterAssembler* assembler) {
+  DoCallRuntimeForPairCommon(assembler);
+}
+
+void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) {
   Node* context_index = __ BytecodeOperandIdx(0);
   Node* receiver_reg = __ BytecodeOperandReg(1);
   Node* first_arg = __ RegisterLocation(receiver_reg);
-  Node* args_count = __ BytecodeOperandCount(2);
+  Node* receiver_args_count = __ BytecodeOperandCount(2);
+  Node* receiver_count = __ Int32Constant(1);
+  Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
 
   // Get the function to call from the native context.
   Node* context = __ GetContext();
@@ -1148,7 +1073,41 @@
   Node* function = __ LoadContextSlot(native_context, context_index);
 
   // Call the function.
-  Node* result = __ CallJS(function, first_arg, args_count);
+  Node* result = __ CallJS(function, context, first_arg, args_count,
+                           TailCallMode::kDisallow);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// CallJSRuntime <context_index> <receiver> <arg_count>
+//
+// Call the JS runtime function that has the |context_index| with the receiver
+// in register |receiver| and |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
+  DoCallJSRuntimeCommon(assembler);
+}
+
+
+// CallJSRuntimeWide <context_index> <receiver> <arg_count>
+//
+// Call the JS runtime function that has the |context_index| with the receiver
+// in register |receiver| and |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallJSRuntimeWide(InterpreterAssembler* assembler) {
+  DoCallJSRuntimeCommon(assembler);
+}
+
+void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
+  Node* new_target = __ GetAccumulator();
+  Node* constructor_reg = __ BytecodeOperandReg(0);
+  Node* constructor = __ LoadRegister(constructor_reg);
+  Node* first_arg_reg = __ BytecodeOperandReg(1);
+  Node* first_arg = __ RegisterLocation(first_arg_reg);
+  Node* args_count = __ BytecodeOperandCount(2);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallConstruct(constructor, context, new_target, first_arg, args_count);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1158,42 +1117,45 @@
 //
 // Call operator new with |constructor| and the first argument in
 // register |first_arg| and |arg_count| arguments in subsequent
+// registers. The new.target is in the accumulator.
 //
-void Interpreter::DoNew(compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
-  Node* constructor_reg = __ BytecodeOperandReg(0);
-  Node* constructor = __ LoadRegister(constructor_reg);
-  Node* first_arg_reg = __ BytecodeOperandReg(1);
-  Node* first_arg = __ RegisterLocation(first_arg_reg);
-  Node* args_count = __ BytecodeOperandCount(2);
-  Node* result =
-      __ CallConstruct(constructor, constructor, first_arg, args_count);
-  __ SetAccumulator(result);
-  __ Dispatch();
+void Interpreter::DoNew(InterpreterAssembler* assembler) {
+  DoCallConstruct(assembler);
+}
+
+
+// NewWide <constructor> <first_arg> <arg_count>
+//
+// Call operator new with |constructor| and the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers. The new.target is in the accumulator.
+//
+void Interpreter::DoNewWide(InterpreterAssembler* assembler) {
+  DoCallConstruct(assembler);
 }
 
 
 // TestEqual <src>
 //
 // Test if the value in the <src> register equals the accumulator.
-void Interpreter::DoTestEqual(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterEquals, assembler);
+void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kEqual, assembler);
 }
 
 
 // TestNotEqual <src>
 //
 // Test if the value in the <src> register is not equal to the accumulator.
-void Interpreter::DoTestNotEqual(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterNotEquals, assembler);
+void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kNotEqual, assembler);
 }
 
 
 // TestEqualStrict <src>
 //
 // Test if the value in the <src> register is strictly equal to the accumulator.
-void Interpreter::DoTestEqualStrict(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterStrictEquals, assembler);
+void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kStrictEqual, assembler);
 }
 
 
@@ -1201,25 +1163,24 @@
 //
 // Test if the value in the <src> register is not strictly equal to the
 // accumulator.
-void Interpreter::DoTestNotEqualStrict(
-    compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterStrictNotEquals, assembler);
+void Interpreter::DoTestNotEqualStrict(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kStrictNotEqual, assembler);
 }
 
 
 // TestLessThan <src>
 //
 // Test if the value in the <src> register is less than the accumulator.
-void Interpreter::DoTestLessThan(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterLessThan, assembler);
+void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kLessThan, assembler);
 }
 
 
 // TestGreaterThan <src>
 //
 // Test if the value in the <src> register is greater than the accumulator.
-void Interpreter::DoTestGreaterThan(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterGreaterThan, assembler);
+void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kGreaterThan, assembler);
 }
 
 
@@ -1227,9 +1188,8 @@
 //
 // Test if the value in the <src> register is less than or equal to the
 // accumulator.
-void Interpreter::DoTestLessThanOrEqual(
-    compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterLessThanOrEqual, assembler);
+void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kLessThanOrEqual, assembler);
 }
 
 
@@ -1237,9 +1197,8 @@
 //
 // Test if the value in the <src> register is greater than or equal to the
 // accumulator.
-void Interpreter::DoTestGreaterThanOrEqual(
-    compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterGreaterThanOrEqual, assembler);
+void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kGreaterThanOrEqual, assembler);
 }
 
 
@@ -1247,7 +1206,7 @@
 //
 // Test if the object referenced by the register operand is a property of the
 // object referenced by the accumulator.
-void Interpreter::DoTestIn(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kHasProperty, assembler);
 }
 
@@ -1256,7 +1215,7 @@
 //
 // Test if the object referenced by the <src> register is an an instance of type
 // referenced by the accumulator.
-void Interpreter::DoTestInstanceOf(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kInstanceOf, assembler);
 }
 
@@ -1264,9 +1223,10 @@
 // ToName
 //
 // Cast the object referenced by the accumulator to a name.
-void Interpreter::DoToName(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoToName(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kToName, accumulator);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kToName, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1275,9 +1235,10 @@
 // ToNumber
 //
 // Cast the object referenced by the accumulator to a number.
-void Interpreter::DoToNumber(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kToNumber, accumulator);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kToNumber, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1286,9 +1247,10 @@
 // ToObject
 //
 // Cast the object referenced by the accumulator to a JSObject.
-void Interpreter::DoToObject(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoToObject(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kToObject, accumulator);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kToObject, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1297,7 +1259,7 @@
 // Jump <imm8>
 //
 // Jump by number of bytes represented by the immediate operand |imm8|.
-void Interpreter::DoJump(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJump(InterpreterAssembler* assembler) {
   Node* relative_jump = __ BytecodeOperandImm(0);
   __ Jump(relative_jump);
 }
@@ -1306,7 +1268,7 @@
 // JumpConstant <idx8>
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool.
-void Interpreter::DoJumpConstant(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1318,8 +1280,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx16| entry in the
 // constant pool.
-void Interpreter::DoJumpConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpConstantWide(InterpreterAssembler* assembler) {
   DoJumpConstant(assembler);
 }
 
@@ -1328,7 +1289,7 @@
 //
 // Jump by number of bytes represented by an immediate operand if the
 // accumulator contains true.
-void Interpreter::DoJumpIfTrue(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* true_value = __ BooleanConstant(true);
@@ -1340,8 +1301,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
 // if the accumulator contains true.
-void Interpreter::DoJumpIfTrueConstant(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
@@ -1355,8 +1315,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
 // if the accumulator contains true.
-void Interpreter::DoJumpIfTrueConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfTrueConstantWide(InterpreterAssembler* assembler) {
   DoJumpIfTrueConstant(assembler);
 }
 
@@ -1365,7 +1324,7 @@
 //
 // Jump by number of bytes represented by an immediate operand if the
 // accumulator contains false.
-void Interpreter::DoJumpIfFalse(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* false_value = __ BooleanConstant(false);
@@ -1377,8 +1336,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
 // if the accumulator contains false.
-void Interpreter::DoJumpIfFalseConstant(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
@@ -1392,8 +1350,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
 // if the accumulator contains false.
-void Interpreter::DoJumpIfFalseConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfFalseConstantWide(InterpreterAssembler* assembler) {
   DoJumpIfFalseConstant(assembler);
 }
 
@@ -1402,11 +1359,11 @@
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is true when the object is cast to boolean.
-void Interpreter::DoJumpIfToBooleanTrue(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* true_value = __ BooleanConstant(true);
   __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
@@ -1419,10 +1376,11 @@
 // if the object referenced by the accumulator is true when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanTrueConstant(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1437,7 +1395,7 @@
 // if the object referenced by the accumulator is true when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanTrueConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   DoJumpIfToBooleanTrueConstant(assembler);
 }
 
@@ -1446,11 +1404,11 @@
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is false when the object is cast to boolean.
-void Interpreter::DoJumpIfToBooleanFalse(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* false_value = __ BooleanConstant(false);
   __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
@@ -1463,10 +1421,11 @@
 // if the object referenced by the accumulator is false when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanFalseConstant(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1481,7 +1440,7 @@
 // if the object referenced by the accumulator is false when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanFalseConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   DoJumpIfToBooleanFalseConstant(assembler);
 }
 
@@ -1490,7 +1449,7 @@
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNull(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
   Node* relative_jump = __ BytecodeOperandImm(0);
@@ -1502,8 +1461,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
 // if the object referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNullConstant(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
   Node* index = __ BytecodeOperandIdx(0);
@@ -1517,17 +1475,15 @@
 //
 // Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
 // if the object referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNullConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfNullConstantWide(InterpreterAssembler* assembler) {
   DoJumpIfNullConstant(assembler);
 }
 
-
-// jumpifundefined <imm8>
+// JumpIfUndefined <imm8>
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is the undefined constant.
-void Interpreter::DoJumpIfUndefined(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* undefined_value =
       __ HeapConstant(isolate_->factory()->undefined_value());
@@ -1540,8 +1496,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
 // if the object referenced by the accumulator is the undefined constant.
-void Interpreter::DoJumpIfUndefinedConstant(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* undefined_value =
       __ HeapConstant(isolate_->factory()->undefined_value());
@@ -1557,13 +1512,44 @@
 // Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
 // if the object referenced by the accumulator is the undefined constant.
 void Interpreter::DoJumpIfUndefinedConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   DoJumpIfUndefinedConstant(assembler);
 }
 
+// JumpIfNotHole <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is the hole.
+void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
+  Node* relative_jump = __ BytecodeOperandImm(0);
+  __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
+}
+
+// JumpIfNotHoleConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// if the object referenced by the accumulator is the hole constant.
+void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant = __ LoadConstantPoolEntry(index);
+  Node* relative_jump = __ SmiUntag(constant);
+  __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
+}
+
+// JumpIfNotHoleConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is the hole constant.
+void Interpreter::DoJumpIfNotHoleConstantWide(InterpreterAssembler* assembler) {
+  DoJumpIfNotHoleConstant(assembler);
+}
 
 void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
-                                  compiler::InterpreterAssembler* assembler) {
+                                  InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant_elements = __ LoadConstantPoolEntry(index);
   Node* literal_index_raw = __ BytecodeOperandIdx(1);
@@ -1571,7 +1557,8 @@
   Node* flags_raw = __ BytecodeOperandImm(2);
   Node* flags = __ SmiTag(flags_raw);
   Node* closure = __ LoadRegister(Register::function_closure());
-  Node* result = __ CallRuntime(function_id, closure, literal_index,
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(function_id, context, closure, literal_index,
                                 constant_elements, flags);
   __ SetAccumulator(result);
   __ Dispatch();
@@ -1582,8 +1569,7 @@
 //
 // Creates a regular expression literal for literal index <literal_idx> with
 // <flags> and the pattern in <pattern_idx>.
-void Interpreter::DoCreateRegExpLiteral(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
 }
 
@@ -1592,8 +1578,7 @@
 //
 // Creates a regular expression literal for literal index <literal_idx> with
 // <flags> and the pattern in <pattern_idx>.
-void Interpreter::DoCreateRegExpLiteralWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateRegExpLiteralWide(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
 }
 
@@ -1602,8 +1587,7 @@
 //
 // Creates an array literal for literal index <literal_idx> with flags <flags>
 // and constant elements in <element_idx>.
-void Interpreter::DoCreateArrayLiteral(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
 }
 
@@ -1612,8 +1596,7 @@
 //
 // Creates an array literal for literal index <literal_idx> with flags <flags>
 // and constant elements in <element_idx>.
-void Interpreter::DoCreateArrayLiteralWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateArrayLiteralWide(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
 }
 
@@ -1622,8 +1605,7 @@
 //
 // Creates an object literal for literal index <literal_idx> with flags <flags>
 // and constant elements in <element_idx>.
-void Interpreter::DoCreateObjectLiteral(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
 }
 
@@ -1632,8 +1614,7 @@
 //
 // Creates an object literal for literal index <literal_idx> with flags <flags>
 // and constant elements in <element_idx>.
-void Interpreter::DoCreateObjectLiteralWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateObjectLiteralWide(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
 }
 
@@ -1642,15 +1623,16 @@
 //
 // Creates a new closure for SharedFunctionInfo at position |index| in the
 // constant pool and with the PretenureFlag <tenured>.
-void Interpreter::DoCreateClosure(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
   // TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of
   // calling into the runtime.
   Node* index = __ BytecodeOperandIdx(0);
   Node* shared = __ LoadConstantPoolEntry(index);
   Node* tenured_raw = __ BytecodeOperandImm(1);
   Node* tenured = __ SmiTag(tenured_raw);
+  Node* context = __ GetContext();
   Node* result =
-      __ CallRuntime(Runtime::kInterpreterNewClosure, shared, tenured);
+      __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared, tenured);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1660,8 +1642,7 @@
 //
 // Creates a new closure for SharedFunctionInfo at position |index| in the
 // constant pool and with the PretenureFlag <tenured>.
-void Interpreter::DoCreateClosureWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateClosureWide(InterpreterAssembler* assembler) {
   return DoCreateClosure(assembler);
 }
 
@@ -1669,10 +1650,11 @@
 // CreateMappedArguments
 //
 // Creates a new mapped arguments object.
-void Interpreter::DoCreateMappedArguments(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
   Node* closure = __ LoadRegister(Register::function_closure());
-  Node* result = __ CallRuntime(Runtime::kNewSloppyArguments_Generic, closure);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1681,21 +1663,56 @@
 // CreateUnmappedArguments
 //
 // Creates a new unmapped arguments object.
-void Interpreter::DoCreateUnmappedArguments(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::FastNewStrictArguments(isolate_);
+  Node* target = __ HeapConstant(callable.code());
+  Node* context = __ GetContext();
   Node* closure = __ LoadRegister(Register::function_closure());
-  Node* result = __ CallRuntime(Runtime::kNewStrictArguments_Generic, closure);
+  Node* result = __ CallStub(callable.descriptor(), target, context, closure);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
+// CreateRestParameter
+//
+// Creates a new rest parameter array.
+void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::FastNewRestParameter(isolate_);
+  Node* target = __ HeapConstant(callable.code());
+  Node* closure = __ LoadRegister(Register::function_closure());
+  Node* context = __ GetContext();
+  Node* result = __ CallStub(callable.descriptor(), target, context, closure);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+// StackCheck
+//
+// Performs a stack guard check.
+void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
+  __ StackCheck();
+  __ Dispatch();
+}
 
 // Throw
 //
 // Throws the exception in the accumulator.
-void Interpreter::DoThrow(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoThrow(InterpreterAssembler* assembler) {
   Node* exception = __ GetAccumulator();
-  __ CallRuntime(Runtime::kThrow, exception);
+  Node* context = __ GetContext();
+  __ CallRuntime(Runtime::kThrow, context, exception);
+  // We shouldn't ever return from a throw.
+  __ Abort(kUnexpectedReturnFromThrow);
+}
+
+
+// ReThrow
+//
+// Re-throws the exception in the accumulator.
+void Interpreter::DoReThrow(InterpreterAssembler* assembler) {
+  Node* exception = __ GetAccumulator();
+  Node* context = __ GetContext();
+  __ CallRuntime(Runtime::kReThrow, context, exception);
   // We shouldn't ever return from a throw.
   __ Abort(kUnexpectedReturnFromThrow);
 }
@@ -1704,59 +1721,105 @@
 // Return
 //
 // Return the value in the accumulator.
-void Interpreter::DoReturn(compiler::InterpreterAssembler* assembler) {
-  __ Return();
+void Interpreter::DoReturn(InterpreterAssembler* assembler) {
+  __ InterpreterReturn();
 }
 
+// Debugger
+//
+// Call runtime to handle debugger statement.
+void Interpreter::DoDebugger(InterpreterAssembler* assembler) {
+  Node* context = __ GetContext();
+  __ CallRuntime(Runtime::kHandleDebuggerStatement, context);
+  __ Dispatch();
+}
 
-// ForInPrepare <cache_type> <cache_array> <cache_length>
+// DebugBreak
+//
+// Call runtime to handle a debug break.
+#define DEBUG_BREAK(Name, ...)                                              \
+  void Interpreter::Do##Name(InterpreterAssembler* assembler) {             \
+    Node* context = __ GetContext();                                        \
+    Node* original_handler = __ CallRuntime(Runtime::kDebugBreak, context); \
+    __ DispatchToBytecodeHandler(original_handler);                         \
+  }
+DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
+#undef DEBUG_BREAK
+
+// ForInPrepare <cache_info_triple>
 //
 // Returns state for for..in loop execution based on the object in the
-// accumulator. The registers |cache_type|, |cache_array|, and
-// |cache_length| represent output parameters.
-void Interpreter::DoForInPrepare(compiler::InterpreterAssembler* assembler) {
+// accumulator. The result is output in registers |cache_info_triple| to
+// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
+// and cache_length respectively.
+void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
   Node* object = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kInterpreterForInPrepare, object);
+  Node* context = __ GetContext();
+  Node* result_triple = __ CallRuntime(Runtime::kForInPrepare, context, object);
+
+  // Set output registers:
+  //   0 == cache_type, 1 == cache_array, 2 == cache_length
+  Node* output_register = __ BytecodeOperandReg(0);
   for (int i = 0; i < 3; i++) {
-    // 0 == cache_type, 1 == cache_array, 2 == cache_length
-    Node* cache_info = __ LoadFixedArrayElement(result, i);
-    Node* cache_info_reg = __ BytecodeOperandReg(i);
-    __ StoreRegister(cache_info, cache_info_reg);
+    Node* cache_info = __ Projection(i, result_triple);
+    __ StoreRegister(cache_info, output_register);
+    output_register = __ NextRegister(output_register);
   }
+  __ Dispatch();
+}
+
+
+// ForInPrepareWide <cache_info_triple>
+//
+// Returns state for for..in loop execution based on the object in the
+// accumulator. The result is output in registers |cache_info_triple| to
+// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
+// and cache_length respectively.
+void Interpreter::DoForInPrepareWide(InterpreterAssembler* assembler) {
+  DoForInPrepare(assembler);
+}
+
+
+// ForInNext <receiver> <index> <cache_info_pair>
+//
+// Returns the next enumerable property in the the accumulator.
+void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
+  Node* receiver_reg = __ BytecodeOperandReg(0);
+  Node* receiver = __ LoadRegister(receiver_reg);
+  Node* index_reg = __ BytecodeOperandReg(1);
+  Node* index = __ LoadRegister(index_reg);
+  Node* cache_type_reg = __ BytecodeOperandReg(2);
+  Node* cache_type = __ LoadRegister(cache_type_reg);
+  Node* cache_array_reg = __ NextRegister(cache_type_reg);
+  Node* cache_array = __ LoadRegister(cache_array_reg);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kForInNext, context, receiver,
+                                cache_array, cache_type, index);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
 
-// ForInNext <receiver> <cache_type> <cache_array> <index>
+// ForInNextWide <receiver> <index> <cache_info_pair>
 //
 // Returns the next enumerable property in the the accumulator.
-void Interpreter::DoForInNext(compiler::InterpreterAssembler* assembler) {
-  Node* receiver_reg = __ BytecodeOperandReg(0);
-  Node* receiver = __ LoadRegister(receiver_reg);
-  Node* cache_type_reg = __ BytecodeOperandReg(1);
-  Node* cache_type = __ LoadRegister(cache_type_reg);
-  Node* cache_array_reg = __ BytecodeOperandReg(2);
-  Node* cache_array = __ LoadRegister(cache_array_reg);
-  Node* index_reg = __ BytecodeOperandReg(3);
-  Node* index = __ LoadRegister(index_reg);
-  Node* result = __ CallRuntime(Runtime::kForInNext, receiver, cache_array,
-                                cache_type, index);
-  __ SetAccumulator(result);
-  __ Dispatch();
+void Interpreter::DoForInNextWide(InterpreterAssembler* assembler) {
+  return DoForInNext(assembler);
 }
 
 
 // ForInDone <index> <cache_length>
 //
 // Returns true if the end of the enumerable properties has been reached.
-void Interpreter::DoForInDone(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
   // TODO(oth): Implement directly rather than making a runtime call.
   Node* index_reg = __ BytecodeOperandReg(0);
   Node* index = __ LoadRegister(index_reg);
   Node* cache_length_reg = __ BytecodeOperandReg(1);
   Node* cache_length = __ LoadRegister(cache_length_reg);
-  Node* result = __ CallRuntime(Runtime::kForInDone, index, cache_length);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallRuntime(Runtime::kForInDone, context, index, cache_length);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1766,11 +1829,12 @@
 //
 // Increments the loop counter in register |index| and stores the result
 // in the accumulator.
-void Interpreter::DoForInStep(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoForInStep(InterpreterAssembler* assembler) {
   // TODO(oth): Implement directly rather than making a runtime call.
   Node* index_reg = __ BytecodeOperandReg(0);
   Node* index = __ LoadRegister(index_reg);
-  Node* result = __ CallRuntime(Runtime::kForInStep, index);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kForInStep, context, index);
   __ SetAccumulator(result);
   __ Dispatch();
 }